[SEBA-83]
Create charts/docs for Persistent Storage
- Ceph (block volumes, and shared filesystem) using Rook
- Local mounted volumes with local-provisioner
- Local directories with local-directory
Change-Id: I65e8a55ca4fbdb6c9754beec6b7ce5ea010ad642
diff --git a/.gitignore b/.gitignore
index 54a5d81..8016ce9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@
voltha/charts
xos-core/charts
xos-profiles/*/charts
+storage/*/charts
diff --git a/local-persistent-volume/templates/storage-class.yaml b/examples/registry-cephfs.yaml
similarity index 61%
copy from local-persistent-volume/templates/storage-class.yaml
copy to examples/registry-cephfs.yaml
index 32cea67..283ee1a 100644
--- a/local-persistent-volume/templates/storage-class.yaml
+++ b/examples/registry-cephfs.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: {{ .Values.storageClassName }}
-provisioner: kubernetes.io/no-provisioner
-volumeBindingMode: WaitForFirstConsumer
+# Values file to implement a docker registry using the cord-cephfs StorageClass
+
+service:
+ type: NodePort
+ nodePort: 30500
+
+persistence:
+ enabled: true
+ storageClass: "cord-cephfs"
+ accessMode: ReadWriteMany
+
+replicaCount: 2
+
+resources:
+ limits:
+ cpu: 100m
+ memory: 100Mi
diff --git a/local-persistent-volume/Chart.yaml b/examples/xos-db-ceph-rbd.yaml
similarity index 82%
copy from local-persistent-volume/Chart.yaml
copy to examples/xos-db-ceph-rbd.yaml
index bacf6f2..0a78154 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/examples/xos-db-ceph-rbd.yaml
@@ -13,5 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+# Use a Ceph RBD volume to store the XOS database
+
+xos-db:
+ needDBPersistence: true
+ storageClassName: cord-ceph-rbd
diff --git a/local-persistent-volume/Chart.yaml b/examples/xos-db-local-dir.yaml
similarity index 75%
copy from local-persistent-volume/Chart.yaml
copy to examples/xos-db-local-dir.yaml
index bacf6f2..ab2e0da 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/examples/xos-db-local-dir.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+# Use a local directory to store the XOS database
+
+xos-db:
+ needDBPersistence: true
+ storageClassName: local-directory
diff --git a/local-persistent-volume/templates/persistent-volume.yaml b/local-persistent-volume/templates/persistent-volume.yaml
deleted file mode 100644
index c012f6f..0000000
--- a/local-persistent-volume/templates/persistent-volume.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: {{ .Values.persistentVolumeName }}
-spec:
- capacity:
- storage: {{ .Values.pvStorageCapacity }}
- accessModes:
- - ReadWriteOnce
- persistentVolumeReclaimPolicy: Retain
- storageClassName: {{ .Values.storageClassName }}
- local:
- path: {{ .Values.hostLocalPath }}
- nodeAffinity:
- required:
- nodeSelectorTerms:
- - matchExpressions:
- - key: kubernetes.io/hostname
- operator: In
- values:
- - {{ .Values.volumeHostName }}
-
diff --git a/local-persistent-volume/values.yaml b/local-persistent-volume/values.yaml
deleted file mode 100644
index 5c24c03..0000000
--- a/local-persistent-volume/values.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-storageClassName: db-local-storage
-persistentVolumeName: db-pv
-pvClaimName: db-pv-claim
-volumeHostName: k8s-01
-hostLocalPath: /var/local/vol1
-pvStorageCapacity: 10Gi
\ No newline at end of file
diff --git a/storage/README.md b/storage/README.md
new file mode 100644
index 0000000..3f2a426
--- /dev/null
+++ b/storage/README.md
@@ -0,0 +1,335 @@
+# CORD Storage charts
+
+These charts implement persistent storage that is within Kubernetes.
+
+See the Kubernetes documentation for background material on how persistent
+storage works:
+
+- [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+- [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+
+Using persistent storage is optional during development, but should be
+provisioned for and configured during production and realistic testing
+scenarios.
+
+## Local Directory
+
+The `local-provisioner` chart creates
+[local](https://kubernetes.io/docs/concepts/storage/volumes/#local) volumes on
+specific nodes, from directories. As there are no enforced limits for volume
+size and the node names are preconfigured, this chart is intended for use only
+for development and testing.
+
+Multiple directories can be specified in the `volumes` list - an example is
+given in the `values.yaml` file of the chart.
+
+The `StorageClass` created for all volumes is `local-directory`.
+
+There is an ansible script that automates the creation of directories on all
+the kubernetes nodes. Make sure that the inventory name in ansible matches the
+one given as `host` in the `volumes` list, then invoke with:
+
+```shell
+ansible-playbook -i <path to ansbible inventory> --extra-vars "helm_values_file:<path to values.yaml>" local-directory-playbook.yaml
+```
+
+## Local Provisioner
+
+The `local-provisioner` chart provides a
+[local](https://kubernetes.io/docs/concepts/storage/volumes/#local),
+non-distributed `PersistentVolume` that is usable on one specific node. It
+does this by running the k8s [external storage local volume
+provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume/helm/provisioner).
+
+This type of storage is useful for workloads that have their own intrinsic HA
+or redundancy strategies, and only need storage on multiple nodes.
+
+This provisioner is not "dynamic" in the sense that that it can't create a new
+`PersistentVolume` on demand from a storage pool, but the provisioner can
+automatically create volumes as disks/partitions are mounted on the nodes.
+
+To create a new PV, a disk or partition on a node has to be formatted and
+mounted in specific locations, after which the provisioner will automatically
+create a `PersistentVolume` for the mount. As these volumes can't be split or
+resized, care must be taken to ensure that the correct quantity, types, and
+sizes of mounts are created for all the `PersistentVolumeClaim`'s required to
+be bound for a specific workload.
+
+By default, two `StorageClasses` were created to differentiate between Hard
+Disks and SSD's:
+
+- `local-hdd`, which offers PV's on volumes mounted in `/mnt/local-storage/hdd/*`
+- `local-ssd`, which offers PV's on volumes mounted in `/mnt/local-storage/ssd/*`
+
+### Adding a new local volume on a node
+
+If you wanted to add a new volume a node, you'd physically install a new disk
+in the system, then determine the device file it uses. Assuming that it's a
+hard disk and the device file is `/dev/sdb`, you might partition, format, and
+mount the disk like this:
+
+```shell
+$ sudo parted -s /dev/sdb \
+ mklabel gpt \
+ mkpart primary ext4 1MiB 100%
+$ sudo mkfs.ext4 /dev/sdb1
+$ echo "/dev/sdb1 /mnt/local-storage/hdd/sdb1 ext4 defaults 0 0" | sudo tee -a /etc/fstab
+$ sudo mount /mnt/local-storage/hdd/sdb1
+```
+
+Then check that the `PersistentVolume` is created by the `local-provisioner`:
+
+```shell
+$ kubectl get pv
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+local-pv-2bfa2c43 19Gi RWO Delete Available local-hdd 6h
+
+$ kubectl describe pv local-pv-
+Name: local-pv-2bfa2c43
+Labels: <none>
+Annotations: pv.kubernetes.io/provisioned-by=local-volume-provisioner-node1-...
+Finalizers: [kubernetes.io/pv-protection]
+StorageClass: local-hdd
+Status: Available
+Claim:
+Reclaim Policy: Delete
+Access Modes: RWO
+Capacity: 19Gi
+Node Affinity:
+ Required Terms:
+ Term 0: kubernetes.io/hostname in [node1]
+Message:
+Source:
+ Type: LocalVolume (a persistent volume backed by local storage on a node)
+ Path: /mnt/local-storage/hdd/sdb1
+Events: <none>
+```
+
+## Ceph deployed with Rook
+
+[Rook](https://rook.github.io/) provides an abstraction layer for Ceph and
+other distributed persistent data storage systems.
+
+There are 3 Rook charts included with CORD:
+
+- `rook-operator`, which runs the volume provisioning portion of Rook (and is a
+ thin wrapper around the upstream [rook-ceph
+ chart](https://rook.github.io/docs/rook/v0.8/helm-operator.html)
+- `rook-cluster`, which defines the Ceph cluster and creates these
+ `StorageClass` objects usable by other charts:
+ - `cord-ceph-rbd`, dynamically create `PersistentVolumes` when a
+ `PersistentVolumeClaim` is created. These volumes are only usable by a
+ single container at a time.
+ - `cord-cephfs`, a single shared filesystem which is mountable
+ `ReadWriteMulti` on multiple containers via `PersistentVolumeClaim`. It's
+ size is predetermined.
+- `rook-tools`, which provides a toolbox container for troubleshooting problems
+ with Rook/Ceph
+
+To create persistent volumes, you will need to load the first 2 charts, with
+the third only needed for troubleshooting and diagnostics.
+
+### Rook Node Prerequisties
+
+By default, all the nodes running k8s are expected to have a directory named
+`/mnt/ceph` where the Ceph data is stored (the `cephDataDir` variable can be
+used to change this path).
+
+In a production deployment, this would ideally be located on it's own block
+storage device.
+
+There should be at least 3 nodes with storage available to provide data
+redundancy.
+
+### Loading Rook Charts
+
+First, add the `rook-beta` repo to helm, then load the `rook-operator` chart
+into the `rook-ceph-system` namespace:
+
+```shell
+cd helm-charts/storage
+helm repo add rook-beta https://charts.rook.io/beta
+helm dep update rook-operator
+helm install --namespace rook-ceph-system -n rook-operator rook-operator
+```
+
+Check that it's running (it will start the `rook-ceph-agent` and
+`rook-discover` DaemonSets):
+
+```shell
+$ kubectl -n rook-ceph-system get pods
+NAME READY STATUS RESTARTS AGE
+rook-ceph-agent-4c66b 1/1 Running 0 6m
+rook-ceph-agent-dsdsr 1/1 Running 0 6m
+rook-ceph-agent-gwjlk 1/1 Running 0 6m
+rook-ceph-operator-687b7bb6ff-vzjsl 1/1 Running 0 7m
+rook-discover-9f87r 1/1 Running 0 6m
+rook-discover-lmhz9 1/1 Running 0 6m
+rook-discover-mxsr5 1/1 Running 0 6m
+```
+
+Next, load the `rook-cluster` chart, which connects the storage on the nodes to
+the Ceph pool, and the CephFS filesystem:
+
+```shell
+helm install -n rook-cluster rook-cluster
+```
+
+Check that the cluster is running - this may take a few minutes, and look for the
+`rook-ceph-mds-*` containers to start:
+
+```shell
+$ kubectl -n rook-ceph get pods
+NAME READY STATUS RESTARTS AGE
+rook-ceph-mds-cord-ceph-filesystem-7564b648cf-4wxzn 1/1 Running 0 1m
+rook-ceph-mds-cord-ceph-filesystem-7564b648cf-rcvnx 1/1 Running 0 1m
+rook-ceph-mgr-a-75654fb698-zqj67 1/1 Running 0 5m
+rook-ceph-mon0-v9d2t 1/1 Running 0 5m
+rook-ceph-mon1-4sxgc 1/1 Running 0 5m
+rook-ceph-mon2-6b6pj 1/1 Running 0 5m
+rook-ceph-osd-id-0-85d887f76c-44w9d 1/1 Running 0 4m
+rook-ceph-osd-id-1-866fb5c684-lmxfp 1/1 Running 0 4m
+rook-ceph-osd-id-2-557dd69c5c-qdnmb 1/1 Running 0 4m
+rook-ceph-osd-prepare-node1-bfzzm 0/1 Completed 0 4m
+rook-ceph-osd-prepare-node2-dt4gx 0/1 Completed 0 4m
+rook-ceph-osd-prepare-node3-t5fnn 0/1 Completed 0 4m
+
+$ kubectl -n rook-ceph get storageclass
+NAME PROVISIONER AGE
+cord-ceph-rbd ceph.rook.io/block 6m
+cord-cephfs kubernetes.io/no-provisioner 6m
+
+$ kubectl -n rook-ceph get filesystems
+NAME AGE
+cord-ceph-filesystem 6m
+
+$ kubectl -n rook-ceph get pools
+NAME AGE
+cord-ceph-pool 6m
+
+$ kubectl -n rook-ceph get persistentvolume
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+cord-cephfs-pv 20Gi RWX Retain Available cord-cephfs 7m
+```
+
+At this point you can create a `PersistentVolumeClaim` on `cord-ceph-rbd` and a
+corresponding `PersistentVolume` will be created by the `rook-ceph-operator`
+acting as a volume provisioner and bound to the PVC.
+
+Creating a `PeristentVolumeClaim` on `cord-cephfs` will mount the same CephFS
+filesystem on every container that requests it. The CephFS PV implementation
+currently isn't as mature as the Ceph RDB volumes, and may not remount properly
+when used with a PVC.
+
+### Troubleshooting Rook
+
+Checking the `rook-ceph-operator` logs can be enlightening:
+
+```shell
+kubectl -n rook-ceph-system logs -f rook-ceph-operator-...
+```
+
+The [Rook toolbox container](https://rook.io/docs/rook/v0.8/toolbox.html) has
+been containerized as the `rook-tools` chart, and provides a variety of tools
+for debugging Rook and Ceph.
+
+Load the `rook-tools` chart:
+
+```shell
+helm install -n rook-tools rook-tools
+```
+
+Once the container is running (check with `kubectl -n rook-ceph get pods`),
+exec into it to run a shell to access all tools:
+
+```shell
+kubectl -n rook-ceph exec -it rook-ceph-tools bash
+```
+
+or run a one-off command:
+
+```shell
+kubectl -n rook-ceph exec rook-ceph-tools -- ceph status
+```
+
+or mount the CephFS volume:
+
+```shell
+kubectl -n rook-ceph exec -it rook-ceph-tools bash
+mkdir /mnt/cephfs
+mon_endpoints=$(grep mon_host /etc/ceph/ceph.conf | awk '{print $3}')
+my_secret=$(grep key /etc/ceph/keyring | awk '{print $3}')
+mount -t ceph -o name=admin,secret=$my_secret $mon_endpoints:/ /mnt/cephfs
+ls /mnt/cephfs
+```
+
+### Cleaning up after Rook
+
+The `rook-operator` chart will leave a few `DaemonSet` behind after it's
+removed. Clean these up using these commands:
+
+```shell
+kubectl -n rook-ceph-system delete daemonset rook-ceph-agent
+kubectl -n rook-ceph-system delete daemonset rook-discover
+helm delete --purge rook-operator
+```
+
+If you have other charts that create `PersistentVolumeClaims`, you may need to
+clean them up manually (for example, if you've changed the `StorageClass` they
+use), list them with:
+
+```shell
+kubectl --all-namespaces get pvc
+```
+
+Files may be left behind in the Ceph storage directory and/or Rook
+configuration that need to be deleted before starting `rook-*` charts. If
+you've used the `automation-tools/kubespray-installer` scripts to set up a
+environment named `test`, you can delete all these files with the following
+commands:
+
+```shell
+cd cord/automation-tools/kubespray-installer
+ansible -i inventories/test/inventory.cfg -b -m shell -a "rm -rf /var/lib/rook && rm -rf /mnt/ceph/*" all
+```
+
+The current upgrade process for Rook involves manual intervention and
+inspection using the tools container.
+
+## Using Persistent Storage
+
+The general process for using persistent storage is to create a
+[PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+on the appropriate
+[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+for the workload you're trying to run.
+
+### Example: XOS Database on a local directory
+
+For development and testing, it may be useful to persist the XOS database
+
+```shell
+helm install -f examples/xos-db-local-dir.yaml -n xos-core xos-core
+```
+
+### Example: XOS Database on a Ceph RBD volume
+
+The XOS Database (Postgres) wants a volume that persists if a node goes down or
+is taken out of service, not shared with other containers running Postgres,
+thus the Ceph RBD volume is a reasonable choice to use with it.
+
+```shell
+helm install -f examples/xos-db-ceph-rbd.yaml -n xos-core xos-core
+```
+
+### Example: Docker Registry on CephFS shared filesystem
+
+The Docker Registry wants a filesystem that is the shared across all
+containers, so it's a suitable workload for the `cephfs` shared filesystem.
+
+There's an example values file available in `helm-charts/examples/registry-cephfs.yaml`
+
+```shell
+helm install -f examples/registry-cephfs.yaml -n docker-registry stable/docker-registry
+```
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/local-directory/Chart.yaml
similarity index 84%
rename from local-persistent-volume/Chart.yaml
rename to storage/local-directory/Chart.yaml
index bacf6f2..d343bf9 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/local-directory/Chart.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+name: local-directory
+version: 0.1.0-dev0
diff --git a/storage/local-directory/local-directory-playbook.yaml b/storage/local-directory/local-directory-playbook.yaml
new file mode 100644
index 0000000..0c03add
--- /dev/null
+++ b/storage/local-directory/local-directory-playbook.yaml
@@ -0,0 +1,41 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This playbook will create directories needed by the local-directories helm
+# chart that will be turned into k8s PersistentVolumes
+#
+# For dev use only - use real volumes/disks in production
+
+- hosts: all
+ become: yes
+
+ vars:
+ helm_values_file: ./values.yaml
+
+ tasks:
+ - name: Read yaml from helm_values_file
+ set_fact:
+ helm_values: "{{ lookup('file', helm_values_file) | from_yaml }}"
+
+ - name: Create directories on nodes
+ when: item.host == ansible_hostname
+ file:
+ path: "{{ item.directory }}"
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+ with_items: "{{ helm_values.volumes }}"
+
diff --git a/storage/local-directory/templates/NOTES.txt b/storage/local-directory/templates/NOTES.txt
new file mode 100644
index 0000000..7cf053a
--- /dev/null
+++ b/storage/local-directory/templates/NOTES.txt
@@ -0,0 +1,9 @@
+A StorageClass was created: {{ .Values.storageClassName }}
+
+The following PersistentVolumes were created using directories on these nodes:
+
+# PV Name, Host, Size, Host Directory
+{{- range $volume := .Values.volumes }}
+{{ $volume.name }}, {{ $volume.host}}, {{ $volume.size }}, {{ $volume.directory }}
+{{- end }}
+
diff --git a/storage/local-directory/templates/localdirs.yaml b/storage/local-directory/templates/localdirs.yaml
new file mode 100644
index 0000000..04a9f51
--- /dev/null
+++ b/storage/local-directory/templates/localdirs.yaml
@@ -0,0 +1,46 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: {{ .Values.storageClassName }}
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+
+{{- range $volume := .Values.volumes }}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ $volume.name }}
+spec:
+ capacity:
+ storage: {{ $volume.size }}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: {{ $.Values.storageClassName }}
+ local:
+ path: {{ $volume.directory }}
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - {{ $volume.host }}
+{{- end }}
diff --git a/local-persistent-volume/templates/storage-class.yaml b/storage/local-directory/values.yaml
similarity index 62%
rename from local-persistent-volume/templates/storage-class.yaml
rename to storage/local-directory/values.yaml
index 32cea67..fbafd9b 100644
--- a/local-persistent-volume/templates/storage-class.yaml
+++ b/storage/local-directory/values.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: {{ .Values.storageClassName }}
-provisioner: kubernetes.io/no-provisioner
-volumeBindingMode: WaitForFirstConsumer
+storageClassName: local-directory
+
+volumes:
+ - name: "small-pv"
+ size: "2Gi"
+ host: "node1"
+ directory: "/var/kubernetes_local_directories/small-pv"
+ - name: "large-pv"
+ size: "10Gi"
+ host: "node1"
+ directory: "/var/kubernetes_local_directories/large-pv"
diff --git a/storage/local-provisioner/Chart.yaml b/storage/local-provisioner/Chart.yaml
new file mode 100644
index 0000000..d99ade5
--- /dev/null
+++ b/storage/local-provisioner/Chart.yaml
@@ -0,0 +1,29 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# NOTE: this Apache v2 licensed code originally came from:
+# https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume/helm/provisioner
+# at commit: fc25fd337b15a413b1162f76b59b2cac7ff39126
+#
+# Minimal changes have been applied to align it with CORD/SEBA
+
+apiVersion: v1
+version: 2.0.0
+description: local provisioner chart
+name: local-provisioner
+keywords:
+ - storage
+ - local
+engine: gotpl
diff --git a/local-persistent-volume/Chart.yaml b/storage/local-provisioner/provisioner-namespace.yaml
similarity index 75%
copy from local-persistent-volume/Chart.yaml
copy to storage/local-provisioner/provisioner-namespace.yaml
index bacf6f2..5923b16 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/local-provisioner/provisioner-namespace.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+{{- if .Values.common.rbac }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.common.namespace }}
+{{- end }}
diff --git a/storage/local-provisioner/templates/00_provisioner_rbac.yaml b/storage/local-provisioner/templates/00_provisioner_rbac.yaml
new file mode 100644
index 0000000..7ddedd9
--- /dev/null
+++ b/storage/local-provisioner/templates/00_provisioner_rbac.yaml
@@ -0,0 +1,101 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{- if .Values.common.rbac }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.common.namespace }}
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.daemonset.serviceAccount }}
+ namespace: {{ .Values.common.namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: local-storage-provisioner-pv-binding
+ namespace: {{ .Values.common.namespace }}
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.daemonset.serviceAccount }}
+ namespace: {{ .Values.common.namespace }}
+roleRef:
+ kind: ClusterRole
+ name: system:persistent-volume-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: local-storage-provisioner-node-clusterrole
+ namespace: {{ .Values.common.namespace }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: local-storage-provisioner-node-binding
+ namespace: {{ .Values.common.namespace }}
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.daemonset.serviceAccount }}
+ namespace: {{ .Values.common.namespace }}
+roleRef:
+ kind: ClusterRole
+ name: local-storage-provisioner-node-clusterrole
+ apiGroup: rbac.authorization.k8s.io
+
+{{- if .Values.common.useJobForCleaning }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: local-storage-provisioner-jobs-role
+ namespace: {{ .Values.common.namespace }}
+rules:
+- apiGroups:
+ - 'batch'
+ resources:
+ - jobs
+ verbs:
+ - '*'
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: local-storage-provisioner-jobs-rolebinding
+ namespace: {{ .Values.common.namespace }}
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.daemonset.serviceAccount }}
+ namespace: {{ .Values.common.namespace }}
+roleRef:
+ kind: Role
+ name: local-storage-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end }} # if .Values.common.useJobForCleaning
+{{- end }} # if .Values.common.rbac
diff --git a/storage/local-provisioner/templates/provisioner.yaml b/storage/local-provisioner/templates/provisioner.yaml
new file mode 100644
index 0000000..1e15e3d
--- /dev/null
+++ b/storage/local-provisioner/templates/provisioner.yaml
@@ -0,0 +1,133 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.common.configMapName }}
+ namespace: {{ .Values.common.namespace }}
+data:
+{{- if .Values.daemonset.nodeLabels }}
+ nodeLabelsForPV: |
+ {{- range $label := .Values.daemonset.nodeLabels }}
+ - {{$label}}
+ {{- end }}
+{{- end }}
+{{- if .Values.common.useAlphaAPI }}
+ useAlphaAPI: "true"
+{{- end }}
+{{- if .Values.common.useJobForCleaning }}
+ useJobForCleaning: "yes"
+{{- end}}
+{{- if .Values.common.minResyncPeriod }}
+ minResyncPeriod: {{ .Values.common.minResyncPeriod | quote }}
+{{- end}}
+ storageClassMap: |
+ {{- range $classConfig := .Values.classes }}
+ {{ $classConfig.name }}:
+ hostDir: {{ $classConfig.hostDir }}
+ mountDir: {{ if $classConfig.mountDir }} {{- $classConfig.mountDir -}} {{ else }} {{- $classConfig.hostDir -}} {{ end }}
+ {{- if $classConfig.blockCleanerCommand }}
+ blockCleanerCommand:
+ {{- range $val := $classConfig.blockCleanerCommand }}
+ - "{{ $val -}}"{{- end}}
+ {{- end }}
+ {{- end }}
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: {{ .Values.daemonset.name }}
+ namespace: {{ .Values.common.namespace }}
+ labels:
+ app: local-volume-provisioner
+spec:
+ selector:
+ matchLabels:
+ app: local-volume-provisioner
+ template:
+ metadata:
+ labels:
+ app: local-volume-provisioner
+ spec:
+ serviceAccountName: {{.Values.daemonset.serviceAccount}}
+{{- if .Values.daemonset.tolerations }}
+ tolerations:
+{{ .Values.daemonset.tolerations | toYaml | trim | indent 8 }}
+{{- end }}
+ containers:
+ - image: "{{ .Values.daemonset.image }}"
+ {{- if .Values.daemonset.imagePullPolicy }}
+ imagePullPolicy: {{ .Values.daemonset.imagePullPolicy | quote }}
+ {{- end }}
+ name: provisioner
+ securityContext:
+ privileged: true
+{{- if .Values.daemonset.resources }}
+ resources:
+{{ .Values.daemonset.resources | toYaml | trim | indent 12 }}
+{{- end }}
+ env:
+ - name: MY_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: MY_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: JOB_CONTAINER_IMAGE
+ value: "{{ .Values.daemonset.image }}"
+ {{- if .Values.daemonset.kubeConfigEnv }}
+ - name: KUBECONFIG
+ value: {{.Values.daemonset.kubeConfigEnv}}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /etc/provisioner/config
+ name: provisioner-config
+ readOnly: true
+ - mountPath: /dev
+ name: provisioner-dev
+ {{- range $classConfig := .Values.classes }}
+ - mountPath: {{ if $classConfig.mountDir }} {{- $classConfig.mountDir -}} {{ else }} {{- $classConfig.hostDir -}} {{ end }}
+ name: {{ $classConfig.name }}
+ mountPropagation: "HostToContainer"
+ {{- end }}
+ volumes:
+ - name: provisioner-config
+ configMap:
+ name: {{ .Values.common.configMapName }}
+ - name: provisioner-dev
+ hostPath:
+ path: /dev
+ {{- range $classConfig := .Values.classes }}
+ - name: {{ $classConfig.name }}
+ hostPath:
+ path: {{ $classConfig.hostDir }}
+ {{- end }}
+
+{{- range $val := .Values.classes }}
+{{- if $val.storageClass }}
+{{- $reclaimPolicy := $val.reclaimPolicy | default "Delete" }}
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: {{ $val.name }}
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+reclaimPolicy: {{ $reclaimPolicy }}
+{{- end }}
+{{- end }}
diff --git a/storage/local-provisioner/values.yaml b/storage/local-provisioner/values.yaml
new file mode 100644
index 0000000..a7e2883
--- /dev/null
+++ b/storage/local-provisioner/values.yaml
@@ -0,0 +1,133 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common options.
+common:
+ # Defines whether to generate service account and role bindings.
+ rbac: true
+
+ # Defines the namespace where provisioner runs
+ namespace: "local-storage"
+
+ # Beta PV.NodeAffinity field is used by default. If running against pre-1.10
+ # k8s version, the `useAlphaAPI` flag must be enabled in the configMap.
+ useAlphaAPI: false
+
+ # Provisioner clean volumes in process by default. If set to true, provisioner
+ # will use Jobs to clean.
+ useJobForCleaning: false
+
+ # Resync period in reflectors will be random between minResyncPeriod and
+ # 2*minResyncPeriod. Default: 5m0s.
+ minResyncPeriod: "5m0s"
+
+ # Defines the name of configmap used by Provisioner
+ configMapName: "local-provisioner-config"
+
+# Configure storage classes.
+classes:
+ - name: "local-hdd" # hard disk volumes
+
+ # Path on the host where local volumes of this storage class are mounted
+ # under.
+ hostDir: "/mnt/local-storage/hdd"
+
+ # Optionally specify mount path of local volumes. By default, we use same
+ # path as hostDir in container.
+ mountDir: false
+
+ blockCleanerCommand:
+ # Do a quick reset of the block device during its cleanup.
+ - "/scripts/quick_reset.sh"
+ # or use dd to zero out block dev in two iterations by uncommenting these lines
+ # - "/scripts/dd_zero.sh"
+ # - "2"
+ # or run shred utility for 2 iterations
+ # - "/scripts/shred.sh"
+ # - "2"
+ # or blkdiscard utility by uncommenting the line below.
+ # - "/scripts/blkdiscard.sh"
+
+ # Uncomment to create storage class object with default configuration.
+ storageClass: true
+
+ # Avaiable reclaim policies: Delete/Retain, defaults: Delete.
+ reclaimPolicy: "Delete"
+
+ - name: "local-ssd" # solid state disk volumes
+
+ # Path on the host where local volumes of this storage class are mounted
+ # under.
+ hostDir: "/mnt/local-storage/ssd"
+
+ # Optionally specify mount path of local volumes. By default, we use same
+ # path as hostDir in container.
+ mountDir: false
+
+ blockCleanerCommand:
+ # Do a quick reset of the block device during its cleanup.
+ - "/scripts/quick_reset.sh"
+ # or use dd to zero out block dev in two iterations by uncommenting these lines
+ # - "/scripts/dd_zero.sh"
+ # - "2"
+ # or run shred utility for 2 iterations
+ # - "/scripts/shred.sh"
+ # - "2"
+ # or blkdiscard utility by uncommenting the line below.
+ # - "/scripts/blkdiscard.sh"
+
+ # Uncomment to create storage class object with default configuration.
+ storageClass: true
+
+ # Avaiable reclaim policies: Delete/Retain, defaults: Delete.
+ reclaimPolicy: "Delete"
+
+
+# Configure DaemonSet for provisioner.
+daemonset:
+
+ # Defines the name of a Provisioner
+ name: "local-volume-provisioner"
+
+ # Defines Provisioner's image name including container registry.
+ image: "quay.io/external_storage/local-volume-provisioner:v2.1.0"
+
+ # Defines Image download policy, see kubernetes documentation for available values.
+ imagePullPolicy: 'IfNotPresent'
+
+ # Defines a name of the service account which Provisioner will use to communicate with API server.
+ serviceAccount: "local-storage-admin"
+
+ # If configured KubeConfigEnv will (optionally) specify the location of kubeconfig file on the node.
+ # kubeConfigEnv: KUBECONFIG
+ kubeConfigEnv: false
+
+ # List of node labels to be copied to the PVs created by the provisioner in a format:
+ #
+ # nodeLabels:
+ # - failure-domain.beta.kubernetes.io/zone
+ # - failure-domain.beta.kubernetes.io/region
+ nodeLabels: []
+
+ # If configured, tolerations will add a toleration field to the DaemonSet PodSpec.
+ #
+ # Node tolerations for local-volume-provisioner scheduling to nodes with taints.
+ # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ tolerations: []
+
+ # If configured, resources will set the requests/limits field to the Daemonset PodSpec.
+ # Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ resources: {}
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-cluster/Chart.yaml
similarity index 77%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-cluster/Chart.yaml
index bacf6f2..0c80d0f 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-cluster/Chart.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+description: Sets up Rook for Ceph storage in testing environments
+name: rook-cluster
+version: 0.1.0-dev0
diff --git a/storage/rook-cluster/templates/00_rook_rbac.yaml b/storage/rook-cluster/templates/00_rook_rbac.yaml
new file mode 100644
index 0000000..9bb4fa6
--- /dev/null
+++ b/storage/rook-cluster/templates/00_rook_rbac.yaml
@@ -0,0 +1,74 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# For more information, see documentation:
+# https://rook.io/docs/rook/v0.8/rbac.html
+# https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.rookClusterNamespace }}
+
+---
+# Next 3 items: Allow the pods in this namespace to work with configmaps
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-cluster
+ namespace: {{ .Values.rookClusterNamespace }}
+
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cluster
+ namespace: {{ .Values.rookClusterNamespace }}
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cluster
+ namespace: {{ .Values.rookClusterNamespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-cluster
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-cluster
+ namespace: {{ .Values.rookClusterNamespace }}
+
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rook-ceph-cluster-mgmt
+ namespace: {{ .Values.rookClusterNamespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Values.rookSystemNamespace }}
+
diff --git a/storage/rook-cluster/templates/NOTES.txt b/storage/rook-cluster/templates/NOTES.txt
new file mode 100644
index 0000000..ede3f63
--- /dev/null
+++ b/storage/rook-cluster/templates/NOTES.txt
@@ -0,0 +1,11 @@
+This chart creates a Ceph cluster using Rook (similar to Rook's cluster.yaml)
+
+Ceph data is stored in this directory on all nodes: {{ .Values.cephDataDir }}
+
+The Ceph pool name is: {{ .Values.rookPoolName }}
+
+StorageClasses provided:
+
+ Rados Block Device (volumes): {{ .Values.rbdStorageClassName }}
+ Shared CephFS Filesystem: {{ .Values.fsStorageClassName }}, of size: {{ .Values.rookCephfsPersistentVolumeSize }}
+
diff --git a/storage/rook-cluster/templates/ceph_cluster.yaml b/storage/rook-cluster/templates/ceph_cluster.yaml
new file mode 100644
index 0000000..99d939a
--- /dev/null
+++ b/storage/rook-cluster/templates/ceph_cluster.yaml
@@ -0,0 +1,60 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# reference: https://rook.github.io/docs/rook/v0.8/ceph-cluster-crd.html
+# Modeled after "Storage Configuration: Cluster wide Directories"
+
+apiVersion: ceph.rook.io/v1beta1
+kind: Cluster
+metadata:
+ name: rook-ceph
+ namespace: {{ .Values.rookClusterNamespace }}
+spec:
+ dataDirHostPath: /var/lib/rook
+ serviceAccount: rook-ceph-cluster
+ # cluster level storage configuration and selection
+ storage:
+ useAllNodes: true
+ useAllDevices: false
+ config:
+ databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)
+ journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger)
+ directories:
+ - path: {{ .Values.cephDataDir | quote }}
+
+---
+# reference: https://rook.io/docs/rook/v0.8/ceph-pool-crd.html
+
+apiVersion: ceph.rook.io/v1beta1
+kind: Pool
+metadata:
+ name: {{ .Values.rookPoolName }}
+ namespace: {{ .Values.rookClusterNamespace }}
+spec:
+ replicated:
+ size: 2
+
+---
+# reference: https://rook.io/docs/rook/v0.8/block.html
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: {{ .Values.rbdStorageClassName }}
+provisioner: ceph.rook.io/block
+parameters:
+ pool: {{ .Values.rookPoolName }}
+ clusterNamespace: {{ .Values.rookClusterNamespace }}
+
diff --git a/storage/rook-cluster/templates/ceph_fs.yaml b/storage/rook-cluster/templates/ceph_fs.yaml
new file mode 100644
index 0000000..e53d1d3
--- /dev/null
+++ b/storage/rook-cluster/templates/ceph_fs.yaml
@@ -0,0 +1,58 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# reference: https://rook.github.io/docs/rook/v0.8/filesystem.html
+
+apiVersion: ceph.rook.io/v1beta1
+kind: Filesystem
+metadata:
+ name: {{ .Values.rookCephfsName }}
+ namespace: {{ .Values.rookClusterNamespace }}
+spec:
+ metadataPool:
+ replicated:
+ size: 2
+ dataPools:
+ - replicated:
+ size: 2
+ metadataServer:
+ activeCount: 1
+ activeStandby: true
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: {{ .Values.fsStorageClassName }}
+provisioner: kubernetes.io/no-provisioner
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ .Values.rookCephfsPersistentVolumeName }}
+spec:
+ storageClassName: {{ .Values.fsStorageClassName }}
+ capacity:
+ storage: {{ .Values.rookCephfsPersistentVolumeSize }}
+ accessModes:
+ - ReadWriteMany
+ flexVolume:
+ driver: ceph.rook.io/rook
+ fsType: ceph
+ options:
+ fsName: {{ .Values.rookCephfsName }}
+ clusterNamespace: {{ .Values.rookClusterNamespace }}
+
diff --git a/storage/rook-cluster/values.yaml b/storage/rook-cluster/values.yaml
new file mode 100644
index 0000000..04470a9
--- /dev/null
+++ b/storage/rook-cluster/values.yaml
@@ -0,0 +1,33 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# values file for rook-cluster
+
+# directory on k8s nodes that holds ceph data
+cephDataDir: "/mnt/ceph"
+
+rookSystemNamespace: "rook-ceph-system"
+rookClusterNamespace: "rook-ceph"
+
+rookPoolName: "cord-ceph-pool"
+rookCephfsName: "cord-ceph-filesystem"
+
+rookCephfsPersistentVolumeName: "cord-cephfs-pv"
+rookCephfsPersistentVolumeSize: 20Gi
+
+# StorageClass used by other charts to create PersistentVolumeClaims
+rbdStorageClassName: "cord-ceph-rbd"
+fsStorageClassName: "cord-cephfs"
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-operator/Chart.yaml
similarity index 78%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-operator/Chart.yaml
index bacf6f2..3f48df7 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-operator/Chart.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+description: Sets up the Rook operator to provide storage
+name: rook-operator
+version: 0.1.0-dev0
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-operator/requirements.yaml
similarity index 78%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-operator/requirements.yaml
index bacf6f2..a16d9c4 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-operator/requirements.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+dependencies:
+- name: rook-ceph
+ version: 0.8.1
+ repository: https://charts.rook.io/beta
diff --git a/storage/rook-operator/templates/NOTES.txt b/storage/rook-operator/templates/NOTES.txt
new file mode 100644
index 0000000..dba486d
--- /dev/null
+++ b/storage/rook-operator/templates/NOTES.txt
@@ -0,0 +1,4 @@
+This chart creates a Ceph provisioning operator with Rook (similar to Rook's operator.yaml)
+
+It must be run in the {{ .Values.rookSystemNamespace }} namespace
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-operator/values.yaml
similarity index 72%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-operator/values.yaml
index bacf6f2..55dc678 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-operator/values.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+# values file for rook-operator
+
+rookSystemNamespace: "rook-ceph-system"
+
+rook-ceph:
+ agent:
+ flexVolumeDirPath: "/var/lib/kubelet/volume-plugins"
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-tools/Chart.yaml
similarity index 78%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-tools/Chart.yaml
index bacf6f2..335a61c 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-tools/Chart.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+description: Toolbox for debuging Rook's Ceph storage
+name: rook-tools
+version: 0.1.0-dev0
+
diff --git a/storage/rook-tools/templates/NOTES.txt b/storage/rook-tools/templates/NOTES.txt
new file mode 100644
index 0000000..b08e84c
--- /dev/null
+++ b/storage/rook-tools/templates/NOTES.txt
@@ -0,0 +1,11 @@
+This chart loads the Rook toolbox container.
+
+Documentation:
+ https://rook.io/docs/rook/v0.8/toolbox.html
+
+Once loaded, run:
+
+kubectl -n {{ .Values.rookClusterNamespace }} exec -it rook-ceph-tools bash
+
+to access the tools
+
diff --git a/storage/rook-tools/templates/toolbox.yaml b/storage/rook-tools/templates/toolbox.yaml
new file mode 100644
index 0000000..be5db7b
--- /dev/null
+++ b/storage/rook-tools/templates/toolbox.yaml
@@ -0,0 +1,61 @@
+---
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Pod
+metadata:
+ name: rook-ceph-tools
+ namespace: {{ .Values.rookClusterNamespace }}
+spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: rook-ceph-tools
+ image: {{ .Values.rookCephToolsImage | quote }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ env:
+ - name: ROOK_ADMIN_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: rook-ceph-mon
+ key: admin-secret
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /dev
+ name: dev
+ - mountPath: /sys/bus
+ name: sysbus
+ - mountPath: /lib/modules
+ name: libmodules
+ - name: mon-endpoint-volume
+ mountPath: /etc/rook
+ hostNetwork: false
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sysbus
+ hostPath:
+ path: /sys/bus
+ - name: libmodules
+ hostPath:
+ path: /lib/modules
+ - name: mon-endpoint-volume
+ configMap:
+ name: rook-ceph-mon-endpoints
+ items:
+ - key: data
+ path: mon-endpoints
+
diff --git a/local-persistent-volume/Chart.yaml b/storage/rook-tools/values.yaml
similarity index 72%
copy from local-persistent-volume/Chart.yaml
copy to storage/rook-tools/values.yaml
index bacf6f2..468e300 100644
--- a/local-persistent-volume/Chart.yaml
+++ b/storage/rook-tools/values.yaml
@@ -1,5 +1,5 @@
---
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-name: local-persistent-volume
-version: 1.0.0
+# values file for rook-tools
+
+rookClusterNamespace: 'rook-ceph'
+
+rookCephToolsImage: 'rook/ceph-toolbox:v0.8.1'
+
+imagePullPolicy: 'IfNotPresent'
+
diff --git a/xos-db/values.yaml b/xos-db/values.yaml
index 35e2ca7..8bb026d 100644
--- a/xos-db/values.yaml
+++ b/xos-db/values.yaml
@@ -25,6 +25,5 @@
# DB persistence related vars
needDBPersistence: false
-storageClassName: cord-block
pvClaimName: xosdb-pv-claim
-
+storageClassName: ""