[ English | Indonesia | 한국어 (대한민국) | Deutsch | English (United Kingdom) ]
Deployment With Ceph¶
참고
For other deployment options, select appropriate Deployment with ...
option from Index page.
Deploy Ceph¶
We are going to install Ceph OSDs backed by loopback devices as this will help us not to attach extra disks, in case if you have enough disks on the node then feel free to skip creating loopback devices by exporting CREATE_LOOPBACK_DEVICES_FOR_CEPH to false and export the block devices names as environment variables(CEPH_OSD_DATA_DEVICE and CEPH_OSD_DB_WAL_DEVICE).
We are also going to seperate Ceph metadata and data onto a different devices to replicate the ideal scenario of fast disks for metadata and slow disks to store data. You can change this as per your design by referring to the documentation explained in ../openstack-helm-infra/ceph-osd/values.yaml
This script will create two loopback devices for Ceph as one disk for OSD data and other disk for block DB and block WAL. If default devices (loop0 and loop1) are busy in your case, feel free to change them by exporting environment variables(CEPH_OSD_DATA_DEVICE and CEPH_OSD_DB_WAL_DEVICE).
참고
if you are rerunning the below script then make sure to skip the loopback device creation by exporting CREATE_LOOPBACK_DEVICES_FOR_CEPH to false.
#!/bin/bash
export CEPH_ENABLED=true
if [ "${CREATE_LOOPBACK_DEVICES_FOR_CEPH:=true}" == "true" ]; then
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data ${CEPH_OSD_DATA_DEVICE:=/dev/loop0} \
--ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=/dev/loop1}
fi
#NOTE: Lint and package chart
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
make -C ${HELM_CHART_ROOT_PATH} "${CHART}"
done
#NOTE: Deploy command
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with Ubuntu kernels < 4.5 this
# should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
fi
tee /tmp/ceph.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
ceph_mgr:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
storage_secrets: true
ceph: true
rbd_provisioner: true
cephfs_provisioner: true
client_secrets: false
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
mon_addr: :6789
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
osd: 1
pg_per_osd: 100
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
storage:
osd:
- data:
type: bluestore
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
pod:
replicas:
mds: 1
mgr: 1
EOF
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
helm upgrade --install ${CHART} ${HELM_CHART_ROOT_PATH}/${CHART} \
--namespace=ceph \
--values=/tmp/ceph.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH:-$(./tools/deployment/common/get-values-overrides.sh ${CHART})}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ceph
#NOTE: Validate deploy
MON_POD=$(kubectl get pods \
--namespace=ceph \
--selector="application=ceph" \
--selector="component=mon" \
--no-headers | awk '{ print $1; exit }')
kubectl exec -n ceph ${MON_POD} -- ceph -s
done
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/040-ceph.sh
Activate the OpenStack namespace to be able to use Ceph¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} ceph-provisioners
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/ceph-openstack-config.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
ceph: false
rbd_provisioner: false
cephfs_provisioner: false
client_secrets: true
bootstrap:
enabled: false
EOF
helm upgrade --install ceph-openstack-config ${HELM_CHART_ROOT_PATH}/ceph-provisioners \
--namespace=openstack \
--values=/tmp/ceph-openstack-config.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
kubectl get -n openstack jobs
kubectl get -n openstack secrets
kubectl get -n openstack configmaps
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/045-ceph-ns-activate.sh
Deploy MariaDB¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_MARIADB:="$(./tools/deployment/common/get-values-overrides.sh mariadb)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} mariadb
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install mariadb ${HELM_CHART_ROOT_PATH}/mariadb \
--namespace=openstack \
--set pod.replicas.server=1 \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_MARIADB}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status mariadb
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/050-mariadb.sh
Deploy RabbitMQ¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_RABBITMQ:="$(./tools/deployment/common/get-values-overrides.sh rabbitmq)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} rabbitmq
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install rabbitmq ${HELM_CHART_ROOT_PATH}/rabbitmq \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_RABBITMQ}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status rabbitmq
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/060-rabbitmq.sh
Deploy Memcached¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_MEMCACHED:="$(./tools/deployment/common/get-values-overrides.sh memcached)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} memcached
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install memcached ${HELM_CHART_ROOT_PATH}/memcached \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_MEMCACHED}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status memcached
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/070-memcached.sh
Deploy Keystone¶
#!/bin/bash
#NOTE: Lint and package chart
make keystone
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:="$(./tools/deployment/common/get-values-overrides.sh keystone)"}
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install keystone ./keystone \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_KEYSTONE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status keystone
export OS_CLOUD=openstack_helm
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack endpoint list
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/080-keystone.sh
Deploy Heat¶
#!/bin/bash
: ${OSH_EXTRA_HELM_ARGS_HEAT:="$(./tools/deployment/common/get-values-overrides.sh heat)"}
#NOTE: Lint and package chart
make heat
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install heat ./heat \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_HEAT}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack orchestration service list
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/090-heat.sh
Deploy Horizon¶
경고
Horizon deployment is not tested in the OSH development environment community gates
#!/bin/bash
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_HORIZON:="$(./tools/deployment/common/get-values-overrides.sh horizon)"}
#NOTE: Lint and package chart
make horizon
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install horizon ./horizon \
--namespace=openstack \
--set network.node_port.enabled=true \
--set network.node_port.port=31000 \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_HORIZON}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status horizon
# Delete the test pod if it still exists
kubectl delete pods -l application=horizon,release_group=horizon,component=test --namespace=openstack --ignore-not-found
helm test horizon
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/100-horizon.sh
Deploy Rados Gateway for object store¶
#!/bin/bash
#NOTE: Get the over-rides to use
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_CEPH_RGW:="$(./tools/deployment/common/get-values-overrides.sh ceph-rgw)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} ceph-rgw
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/radosgw-openstack.yaml <<EOF
endpoints:
identity:
namespace: openstack
object_store:
namespace: openstack
ceph_mon:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
ceph: true
bootstrap:
enabled: false
conf:
rgw_ks:
enabled: true
pod:
replicas:
rgw: 1
EOF
helm upgrade --install radosgw-openstack ${HELM_CHART_ROOT_PATH}/ceph-rgw \
--namespace=openstack \
--values=/tmp/radosgw-openstack.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CEPH_RGW}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status radosgw-openstack
export OS_CLOUD=openstack_helm
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack service list
openstack container create 'mygreatcontainer'
curl -L -o /tmp/important-file.jpg https://imgflip.com/s/meme/Cute-Cat.jpg
openstack object create --name 'superimportantfile.jpg' 'mygreatcontainer' /tmp/important-file.jpg
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/110-ceph-radosgateway.sh
Deploy Glance¶
#!/bin/bash
#NOTE: Lint and package chart
make glance
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
: ${OSH_OPENSTACK_RELEASE:="newton"}
#NOTE(portdirect), this could be: radosgw, rbd, swift or pvc
: ${GLANCE_BACKEND:="swift"}
#NOTE: Get the over-rides to use
: ${OSH_EXTRA_HELM_ARGS_GLANCE:="$(./tools/deployment/common/get-values-overrides.sh glance)"}
tee /tmp/glance.yaml <<EOF
storage: ${GLANCE_BACKEND}
EOF
if [ "x${OSH_OPENSTACK_RELEASE}" == "xnewton" ]; then
# NOTE(portdirect): glance APIv1 is required for heat in Newton
tee -a /tmp/glance.yaml <<EOF
conf:
glance:
DEFAULT:
enable_v1_api: true
enable_v2_registry: true
manifests:
deployment_registry: true
ingress_registry: true
pdb_registry: true
service_ingress_registry: true
service_registry: true
EOF
fi
helm upgrade --install glance ./glance \
--namespace=openstack \
--values=/tmp/glance.yaml \
--set manifests.network_policy=true \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_GLANCE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status glance
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack image list
openstack image show 'Cirros 0.3.5 64-bit'
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/120-glance.sh
Deploy Cinder¶
경고
Cinder deployment is not tested in the OSH development environment community gates
#!/bin/bash
: ${OSH_EXTRA_HELM_ARGS_CINDER:="$(./tools/deployment/common/get-values-overrides.sh cinder)"}
#NOTE: Lint and package chart
make cinder
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
tee /tmp/cinder.yaml <<EOF
conf:
ceph:
pools:
backup:
replication: 1
crush_rule: same_host
chunk_size: 8
app_name: cinder-backup
cinder.volumes:
replication: 1
crush_rule: same_host
chunk_size: 8
app_name: cinder-volume
EOF
helm upgrade --install cinder ./cinder \
--namespace=openstack \
--values=/tmp/cinder.yaml \
--set manifests.network_policy=true \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_CINDER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
openstack volume type list
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/130-cinder.sh
Deploy OpenvSwitch¶
#!/bin/bash
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:="$(./tools/deployment/common/get-values-overrides.sh openvswitch)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} openvswitch
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install openvswitch ${HELM_CHART_ROOT_PATH}/openvswitch \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_OPENVSWITCH}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status openvswitch
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/140-openvswitch.sh
Deploy Libvirt¶
#!/bin/bash
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
#NOTE: Lint and package chart
make -C ${HELM_CHART_ROOT_PATH} libvirt
#NOTE: Deploy command
: ${OSH_EXTRA_HELM_ARGS:=""}
helm upgrade --install libvirt ${HELM_CHART_ROOT_PATH}/libvirt \
--namespace=openstack \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_LIBVIRT}
#NOTE(portdirect): We don't wait for libvirt pods to come up, as they depend
# on the neutron agents being up.
#NOTE: Validate Deployment info
helm status libvirt
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/150-libvirt.sh
Deploy Compute Kit (Nova and Neutron)¶
#!/bin/bash
export OSH_EXTRA_HELM_ARGS_NOVA="--set manifests.network_policy=true $(./tools/deployment/common/get-values-overrides.sh nova)"
# Deploy nova and neutron charts
./tools/deployment/component/compute-kit/compute-kit.sh
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/160-compute-kit.sh
Setup the gateway to the public network¶
#!/bin/bash
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
sudo ip addr add ${OSH_BR_EX_ADDR} dev br-ex
sudo ip link set br-ex up
: ${DNSMASQ_IMAGE:=docker.io/openstackhelm/neutron:train-ubuntu_bionic}
# NOTE(portdirect): With Docker >= 1.13.1 the default FORWARD chain policy is
# configured to DROP, for the l3 agent to function as expected and for
# VMs to reach the outside world correctly this needs to be set to ACCEPT.
sudo iptables -P FORWARD ACCEPT
# Setup masquerading on default route dev to public subnet by searching for the
# interface with default routing, if multiple default routes exist then select
# the one with the lowest metric.
DEFAULT_ROUTE_DEV=$(route -n | awk '/^0.0.0.0/ { print $5 " " $NF }' | sort | awk '{ print $NF; exit }')
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_ROUTE_DEV} -s ${OSH_EXT_SUBNET} -j MASQUERADE
# NOTE(portdirect): Setup DNS for public endpoints
sudo docker run -d \
--name br-ex-dns-server \
--net host \
--cap-add=NET_ADMIN \
--volume /etc/kubernetes/kubelet-resolv.conf:/etc/kubernetes/kubelet-resolv.conf:ro \
--entrypoint dnsmasq \
${DNSMASQ_IMAGE} \
--keep-in-foreground \
--no-hosts \
--bind-interfaces \
--resolv-file=/etc/kubernetes/kubelet-resolv.conf \
--address="/svc.cluster.local/${OSH_BR_EX_ADDR%/*}" \
--listen-address="${OSH_BR_EX_ADDR%/*}"
sleep 1
sudo docker top br-ex-dns-server
Alternatively, this step can be performed by running the script directly:
./tools/deployment/developer/ceph/170-setup-gateway.sh