[ English | Indonesia | 한국어 (대한민국) | Deutsch | English (United Kingdom) ]
Deployment With NFS¶
참고
For other deployment options, select appropriate Deployment with ...
option from Index page.
Deploy NFS Provisioner¶
#!/bin/bash
#NOTE: Deploy command
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
helm upgrade --install nfs-provisioner ${OSH_INFRA_PATH}/nfs-provisioner \
--namespace=nfs \
--set storageclass.name=general \
${OSH_EXTRA_HELM_ARGS_NFS_PROVISIONER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh nfs
#NOTE: Display info
helm status nfs-provisioner
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/040-nfs-provisioner.sh
Deploy MariaDB¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_MARIADB:="$(./tools/deployment/common/get-values-overrides.sh mariadb)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} mariadb
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install mariadb ${HELM_CHART_ROOT_PATH}/mariadb \
--namespace=openstack \
--set pod.replicas.server=1 \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_MARIADB}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status mariadb
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/050-mariadb.sh
Deploy RabbitMQ¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_RABBITMQ:="$(./tools/deployment/common/get-values-overrides.sh rabbitmq)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} rabbitmq
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install rabbitmq ${HELM_CHART_ROOT_PATH}/rabbitmq \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_RABBITMQ}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status rabbitmq
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/060-rabbitmq.sh
Deploy Memcached¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_MEMCACHED:="$(./tools/deployment/common/get-values-overrides.sh memcached)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} memcached
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install memcached ${HELM_CHART_ROOT_PATH}/memcached \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_MEMCACHED}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status memcached
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/070-memcached.sh
Deploy Keystone¶
#!/bin/bash
#NOTE: Lint and package chart
make keystone
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:="$(./tools/deployment/common/get-values-overrides.sh keystone)"}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install keystone ./keystone \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_KEYSTONE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status keystone
export OS_CLOUD=openstack_helm
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack endpoint list
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/080-keystone.sh
Deploy Heat¶
#!/bin/bash
: ${OSH_EXTRA_HELM_ARGS_HEAT:="$(./tools/deployment/common/get-values-overrides.sh heat)"}
#NOTE: Lint and package chart
make heat
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install heat ./heat \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_HEAT}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack orchestration service list
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/090-heat.sh
Deploy Horizon¶
#!/bin/bash
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_HORIZON:="$(./tools/deployment/common/get-values-overrides.sh horizon)"}
#NOTE: Lint and package chart
make horizon
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install horizon ./horizon \
--namespace=openstack \
--set network.node_port.enabled=true \
--set network.node_port.port=31000 \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_HORIZON}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status horizon
# Delete the test pod if it still exists
kubectl delete pods -l application=horizon,release_group=horizon,component=test --namespace=openstack --ignore-not-found
helm test horizon
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/100-horizon.sh
Deploy Glance¶
#!/bin/bash
#NOTE: Lint and package chart
make glance
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_GLANCE:="$(./tools/deployment/common/get-values-overrides.sh glance)"}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
: ${OSH_OPENSTACK_RELEASE:="newton"}
: ${GLANCE_BACKEND:="pvc"}
tee /tmp/glance.yaml <<EOF
storage: ${GLANCE_BACKEND}
EOF
if [ "x${OSH_OPENSTACK_RELEASE}" == "xnewton" ]; then
# NOTE(portdirect): glance APIv1 is required for heat in Newton
tee -a /tmp/glance.yaml <<EOF
conf:
glance:
DEFAULT:
enable_v1_api: true
enable_v2_registry: true
manifests:
deployment_registry: true
ingress_registry: true
pdb_registry: true
service_ingress_registry: true
service_registry: true
EOF
fi
helm upgrade --install glance ./glance \
--namespace=openstack \
--values=/tmp/glance.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_GLANCE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status glance
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack image list
openstack image show 'Cirros 0.3.5 64-bit'
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/120-glance.sh
Deploy OpenvSwitch¶
#!/bin/bash
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:="$(./tools/deployment/common/get-values-overrides.sh openvswitch)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} openvswitch
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install openvswitch ${HELM_CHART_ROOT_PATH}/openvswitch \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_OPENVSWITCH}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status openvswitch
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/140-openvswitch.sh
Deploy Libvirt¶
#!/bin/bash
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} libvirt
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install libvirt ${HELM_CHART_ROOT_PATH}/libvirt \
--namespace=openstack \
--set conf.ceph.enabled=false \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_LIBVIRT}
#NOTE(portdirect): We don't wait for libvirt pods to come up, as they depend
# on the neutron agents being up.
#NOTE: Validate Deployment info
helm status libvirt
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/150-libvirt.sh
Deploy Compute Kit (Nova and Neutron)¶
#!/bin/bash
# Deploy nova and neutron charts
./tools/deployment/component/compute-kit/compute-kit.sh
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/160-compute-kit.sh
Setup the gateway to the public network¶
#!/bin/bash
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
sudo ip addr add ${OSH_BR_EX_ADDR} dev br-ex
sudo ip link set br-ex up
: ${DNSMASQ_IMAGE:=docker.io/openstackhelm/neutron:train-ubuntu_bionic}
# NOTE(portdirect): With Docker >= 1.13.1 the default FORWARD chain policy is
# configured to DROP, for the l3 agent to function as expected and for
# VMs to reach the outside world correctly this needs to be set to ACCEPT.
sudo iptables -P FORWARD ACCEPT
# Setup masquerading on default route dev to public subnet by searching for the
# interface with default routing, if multiple default routes exist then select
# the one with the lowest metric.
DEFAULT_ROUTE_DEV=$(route -n | awk '/^0.0.0.0/ { print $5 " " $NF }' | sort | awk '{ print $NF; exit }')
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_ROUTE_DEV} -s ${OSH_EXT_SUBNET} -j MASQUERADE
# NOTE(portdirect): Setup DNS for public endpoints
sudo docker run -d \
--name br-ex-dns-server \
--net host \
--cap-add=NET_ADMIN \
--volume /etc/kubernetes/kubelet-resolv.conf:/etc/kubernetes/kubelet-resolv.conf:ro \
--entrypoint dnsmasq \
${DNSMASQ_IMAGE} \
--keep-in-foreground \
--no-hosts \
--bind-interfaces \
--resolv-file=/etc/kubernetes/kubelet-resolv.conf \
--address="/svc.cluster.local/${OSH_BR_EX_ADDR%/*}" \
--listen-address="${OSH_BR_EX_ADDR%/*}"
sleep 1
sudo docker top br-ex-dns-server
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/nfs/170-setup-gateway.sh