CORD-2965 configmap support

Change-Id: I4c6bfe970dc7b466ac3d370e0e056bf0e67dfa1c
diff --git a/xos/synchronizer/models/kubernetes.xproto b/xos/synchronizer/models/kubernetes.xproto
index 8c2fb2c..40229f7 100644
--- a/xos/synchronizer/models/kubernetes.xproto
+++ b/xos/synchronizer/models/kubernetes.xproto
@@ -8,5 +8,32 @@
 
 message KubernetesServiceInstance (ComputeServiceInstance){
      option verbose_name = "Kubernetes Service Instance";
-     optional string pod_ip = 1 [max_length=32, db_index = False, null = True, blank = True];
+     optional string pod_ip = 1 [max_length=32, db_index = False, null = True, blank = True, help_text = "IP address of pod"];
 }
+
+message KubernetesData (XOSBase) {
+    optional string name = 1 [max_length=256, db_index = False, null = False, blank = False, help_text = "Name of this data store" ];
+    optional string data = 2 [db_index = False, null = True, blank = True, help_text = "Set of key,value pairs encoded as a json dictionary"];
+    required manytoone trust_domain->TrustDomain:kubernetes_configmaps = 3 [db_index = True, null = False, blank = False, help_text = "Trust domain this data resides in"];
+}
+
+message KubernetesConfigMap (KubernetesData) {
+}
+
+message KubernetesSecret (KubernetesData) {
+}
+
+message KubernetesSecretVolumeMount (XOSBase) {
+    required manytoone secret->KubernetesSecret:kubernetes_secret_volume_mounts = 1 [db_index = True, null = False, blank = False, help_text = "Secret to mount"];
+    required manytoone service_instance->KubernetesServiceInstance:kubernetes_secret_volume_mounts = 2 [db_index = True, null = False, blank = False, help_text = "Service instance in which to mount secret"];
+    optional string mount_path = 3 [max_length=256, db_index = False, null = False, blank = False, help_text = "Path to mount secret"];
+    optional string sub_path = 4 [max_length=1024, db_index = False, null = True, blank = True, help_text = "Subpath within secret to mount"];
+}
+
+message KubernetesConfigVolumeMount (XOSBase) {
+    required manytoone config->KubernetesConfigMap:kubernetes_config_voume_mounts = 1 [db_index = True, null = False, blank = False, help_text = "Config to mount"];
+    required manytoone service_instance->KubernetesServiceInstance:kubernetes_config_volume_mounts = 2 [db_index = True, null = False, blank = False, help_text = "Service instance in which to mount config"];
+    optional string mount_path = 3 [max_length=1024, db_index = False, null = False, blank = False, help_text = "Path to mount secret"];
+    optional string sub_path = 4 [max_length=1024, db_index = False, null = True, blank = True, help_text = "Subpath within secret to mount"];
+}
+
diff --git a/xos/synchronizer/pull_steps/pull_pods.py b/xos/synchronizer/pull_steps/pull_pods.py
index 8d5d544..519e587 100644
--- a/xos/synchronizer/pull_steps/pull_pods.py
+++ b/xos/synchronizer/pull_steps/pull_pods.py
@@ -227,8 +227,17 @@
                                                     backend_handle = self.obj_to_handle(pod),
                                                     xos_managed = False)
                 xos_pod.save()
+                xos_pods_by_name[k] = xos_pod
                 log.info("Created XOS POD %s" % xos_pod.name)
 
+            # Check to see if the ip address has changed. This can happen for pods that are managed by XOS. The IP
+            # isn't available immediately when XOS creates a pod, but shows up a bit later. So handle that case
+            # here.
+            xos_pod = xos_pods_by_name[k]
+            if (pod.status.pod_ip is not None) and (xos_pod.pod_ip != pod.status.pod_ip):
+                xos_pod.pod_ip = pod.status.pod_ip
+                xos_pod.save(update_fields = ["pod_ip"])
+
         # For each xos pod, see if there is no k8s pod. If that's the case, then the pud must have been deleted.
         for (k,xos_pod) in xos_pods_by_name.items():
             if (not k in k8s_pods_by_name):
diff --git a/xos/synchronizer/steps/sync_configmap.py b/xos/synchronizer/steps/sync_configmap.py
new file mode 100644
index 0000000..e69a392
--- /dev/null
+++ b/xos/synchronizer/steps/sync_configmap.py
@@ -0,0 +1,82 @@
+
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+    sync_configmap.py
+
+    Synchronize Config Maps.
+"""
+
+import json
+from synchronizers.new_base.syncstep import SyncStep
+from synchronizers.new_base.modelaccessor import KubernetesConfigMap
+
+from xosconfig import Config
+from multistructlog import create_logger
+
+from kubernetes.client.rest import ApiException
+from kubernetes import client as kubernetes_client, config as kubernetes_config
+
+log = create_logger(Config().get('logging'))
+
+class SyncKubernetesConfigMap(SyncStep):
+
+    """
+        SyncKubernetesConfigMap
+
+        Implements sync step for syncing ConfigMaps.
+    """
+
+    provides = [KubernetesConfigMap]
+    observes = KubernetesConfigMap
+    requested_interval = 0
+
+    def __init__(self, *args, **kwargs):
+        super(SyncKubernetesConfigMap, self).__init__(*args, **kwargs)
+        kubernetes_config.load_incluster_config()
+        self.v1 = kubernetes_client.CoreV1Api()
+
+    def get_config_map(self, o):
+        """ Given an XOS KubernetesConfigMap object, read the corresponding ConfigMap from Kubernetes.
+            return None if no ConfigMap exists.
+        """
+        try:
+            config_map = self.v1.read_namespaced_config_map(o.name, o.trust_domain.name)
+        except ApiException, e:
+            if e.status == 404:
+                return None
+            raise
+        return config_map
+
+    def sync_record(self, o):
+            config_map = self.get_config_map(o)
+            if not config_map:
+                config_map = kubernetes_client.V1ConfigMap()
+                config_map.data = json.loads(o.data)
+                config_map.metadata = kubernetes_client.V1ObjectMeta(name=o.name)
+
+                config_map = self.v1.create_namespaced_config_map(o.trust_domain.name, config_map)
+            else:
+                config_map.data = json.loads(o.data)
+                self.v1.patch_namespaced_config_map(o.name, o.trust_domain.name, config_map)
+
+            if (not o.backend_handle):
+                o.backend_handle = config_map.metadata.self_link
+                o.save(update_fields=["backend_handle"])
+
+    def delete_record(self, port):
+        # TODO(smbaker): Implement delete step
+        pass
+
diff --git a/xos/synchronizer/steps/sync_kubernetesserviceinstance.py b/xos/synchronizer/steps/sync_kubernetesserviceinstance.py
index 27badfa..0980896 100644
--- a/xos/synchronizer/steps/sync_kubernetesserviceinstance.py
+++ b/xos/synchronizer/steps/sync_kubernetesserviceinstance.py
@@ -63,41 +63,81 @@
             raise
         return pod
 
+    def generate_pod_spec(self, o):
+        pod = kubernetes_client.V1Pod()
+        pod.metadata = kubernetes_client.V1ObjectMeta(name=o.name)
+
+        if o.slice.trust_domain:
+            pod.metadata.namespace = o.slice.trust_domain.name
+
+        if o.image.tag:
+            imageName = o.image.name + ":" + o.image.tag
+        else:
+            # TODO(smbaker): Is this case possible?
+            imageName = o.image.name
+
+        volumes = []
+        volume_mounts = []
+
+        # Attach and mount the configmaps
+        for xos_vol in o.kubernetes_config_volume_mounts.all():
+            k8s_vol = kubernetes_client.V1Volume(name=xos_vol.config.name)
+            k8s_vol.config_map = kubernetes_client.V1ConfigMapVolumeSource(name=xos_vol.config.name)
+            volumes.append(k8s_vol)
+
+            k8s_vol_m = kubernetes_client.V1VolumeMount(name=xos_vol.config.name,
+                                                        mount_path=xos_vol.mount_path,
+                                                        sub_path=xos_vol.sub_path)
+            volume_mounts.append(k8s_vol_m)
+
+        # Attach and mount the secrets
+        for xos_vol in o.kubernetes_secret_volume_mounts.all():
+            k8s_vol = kubernetes_client.V1Volume(name=xos_vol.secret.name)
+            k8s_vol.secret = kubernetes_client.V1SecretVolumeSource(secret_name=xos_vol.secret.name)
+            volumes.append(k8s_vol)
+
+            k8s_vol_m = kubernetes_client.V1VolumeMount(name=xos_vol.secret.name,
+                                                        mount_path=xos_vol.mount_path,
+                                                        sub_path=xos_vol.sub_path)
+            volume_mounts.append(k8s_vol_m)
+
+        container = kubernetes_client.V1Container(name=o.name,
+                                                  image=imageName,
+                                                  volume_mounts=volume_mounts)
+
+        spec = kubernetes_client.V1PodSpec(containers=[container], volumes=volumes)
+        pod.spec = spec
+
+        if o.slice.principal:
+            pod.spec.service_account = o.slice.principal.name
+
+        return pod
+
     def sync_record(self, o):
         if o.xos_managed:
             if (not o.slice) or (not o.slice.trust_domain):
                 raise Exception("No trust domain for service instance", o=o)
 
             if (not o.name):
-                raise Exception("No name for service instance", o=o)
+                raise Exception("No name for service instance")
 
             pod = self.get_pod(o)
             if not pod:
-                # make a pod!
-                pod = kubernetes_client.V1Pod()
-                pod.metadata = kubernetes_client.V1ObjectMeta(name=o.name)
-
-                if o.slice.trust_domain:
-                    pod.metadata.namespace = o.slice.trust_domain.name
-
-                if o.image.tag:
-                    imageName = o.image.name + ":" + o.image.tag
-                else:
-                    # TODO(smbaker): Is this case possible?
-                    imageName = o.image.name
-
-                container=kubernetes_client.V1Container(name=o.name,
-                                                        image=imageName)
-
-                spec = kubernetes_client.V1PodSpec(containers=[container])
-                pod.spec = spec
-
-                if o.slice.principal:
-                    pod.spec.service_account = o.slice.principal.name
+                pod = self.generate_pod_spec(o)
 
                 log.info("Creating pod", o=o, pod=pod)
 
                 pod = self.v1.create_namespaced_pod(o.slice.trust_domain.name, pod)
+            else:
+                log.info("Replacing pod", o=o, pod=pod)
+
+                # TODO: apply changes, perhaps by calling self.generate_pod_spec() and copying in the differences,
+                # to accomodate new volumes that might have been attached, or other changes.
+
+                # If we don't apply any changes to the pod, it's still the case that Kubernetes will pull in new
+                # mounts of existing configmaps during the replace operation, if the configmap contents have changed.
+
+                pod = self.v1.replace_namespaced_pod(o.name, o.slice.trust_domain.name, pod)
 
             if (not o.backend_handle):
                 o.backend_handle = pod.metadata.self_link
diff --git a/xos/synchronizer/steps/sync_secret.py b/xos/synchronizer/steps/sync_secret.py
new file mode 100644
index 0000000..903be4f
--- /dev/null
+++ b/xos/synchronizer/steps/sync_secret.py
@@ -0,0 +1,82 @@
+
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+    sync_secret.py
+
+    Synchronize Secrets.
+"""
+
+import json
+from synchronizers.new_base.syncstep import SyncStep
+from synchronizers.new_base.modelaccessor import KubernetesSecret
+
+from xosconfig import Config
+from multistructlog import create_logger
+
+from kubernetes.client.rest import ApiException
+from kubernetes import client as kubernetes_client, config as kubernetes_config
+
+log = create_logger(Config().get('logging'))
+
+class SyncKubernetesSecret(SyncStep):
+
+    """
+        SyncKubernetesSecret
+
+        Implements sync step for syncing Secrets.
+    """
+
+    provides = [KubernetesSecret]
+    observes = KubernetesSecret
+    requested_interval = 0
+
+    def __init__(self, *args, **kwargs):
+        super(SyncKubernetesSecret, self).__init__(*args, **kwargs)
+        kubernetes_config.load_incluster_config()
+        self.v1 = kubernetes_client.CoreV1Api()
+
+    def get_secret(self, o):
+        """ Given an XOS KubernetesSecret object, read the corresponding Secret from Kubernetes.
+            return None if no Secret exists.
+        """
+        try:
+            secret = self.v1.read_namespaced_secret(o.name, o.trust_domain.name)
+        except ApiException, e:
+            if e.status == 404:
+                return None
+            raise
+        return secret
+
+    def sync_record(self, o):
+            secret = self.get_secret(o)
+            if not secret:
+                secret = kubernetes_client.V1Secret()
+                secret.data = json.loads(o.data)
+                secret.metadata = kubernetes_client.V1ObjectMeta(name=o.name)
+
+                secret = self.v1.create_namespaced_secret(o.trust_domain.name, secret)
+            else:
+                secret.data = json.loads(o.data)
+                self.v1.patch_namespaced_secret(o.name, o.trust_domain.name, secret)
+
+            if (not o.backend_handle):
+                o.backend_handle = secret.metadata.self_link
+                o.save(update_fields=["backend_handle"])
+
+    def delete_record(self, port):
+        # TODO(smbaker): Implement delete step
+        pass
+