adding ability to start container and monitor their status
Change-Id: I6cbfcf1046c3d9a452d4177564a6d6c1f3773b2d
diff --git a/common/utils/dockerhelpers.py b/common/utils/dockerhelpers.py
index 0b24dd8..6db4ebe 100644
--- a/common/utils/dockerhelpers.py
+++ b/common/utils/dockerhelpers.py
@@ -24,6 +24,7 @@
from docker import Client
+docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
log = get_logger()
@@ -38,7 +39,7 @@
my_container_id = os.environ.get('HOSTNAME', None)
try:
- docker_cli = Client(base_url='unix://tmp/docker.sock')
+ docker_cli = Client(base_url=docker_socket)
info = docker_cli.inspect_container(my_container_id)
except Exception, e:
@@ -49,6 +50,20 @@
return name
+def create_host_config(volumes, ports):
+ try:
+ port_bindings = { ports[0] : None }
+ binds = ['{0}:{1}'.format(k, v) for k, v in volumes.iteritems()]
+ docker_cli = Client(base_url=docker_socket)
+ host_config = docker_cli.create_host_config(binds=binds,
+ port_bindings=port_bindings)
+ except Exception, e:
+ log.exception('failed host config creation', volumes, ports, e=e)
+ raise
+
+ return host_config
+
+
def create_container_network(name, links):
"""
Creates a container networks based on a set of containers.
@@ -57,10 +72,10 @@
:return: a network configuration
"""
try:
- docker_cli = Client(base_url='unix://tmp/docker.sock')
+ docker_cli = Client(base_url=docker_socket)
docker_cli.create_network(name)
networking_config = docker_cli.create_networking_config({
- 'network1': docker_cli.create_endpoint_config(links = links)
+ name : docker_cli.create_endpoint_config(links=links)
})
except Exception, e:
log.exception('failed network creation', name, e=e)
@@ -77,9 +92,11 @@
:return: the containers name
"""
try:
- docker_cli = Client(base_url='unix://tmp/docker.sock')
- docker_cli.create_container(**args)
+ docker_cli = Client(base_url=docker_socket)
+ container = docker_cli.create_container(**args)
+ response = docker_cli.start(container=container.get('Id'))
except Exception, e:
log.exception('failed', e=e)
raise
+ return response
diff --git a/podder/consul_mgr.py b/podder/consul_mgr.py
index c62ca58..9187093 100644
--- a/podder/consul_mgr.py
+++ b/podder/consul_mgr.py
@@ -1,13 +1,13 @@
-import consul
+from consul.twisted import Consul
from structlog import get_logger
from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
+from twisted.internet.defer import returnValue
-from common.utils.dockerhelpers import create_container_network, start_container
+from common.utils.dockerhelpers import create_container_network, start_container, create_host_config
containers = {
'chameleon' : {
- 'image' : 'opencord/chameleon',
+ 'image' : 'cord/chameleon',
'command' : [ "/chameleon/main.py",
"-v",
"--consul=consul:8500",
@@ -18,9 +18,9 @@
"-v"],
'ports' : [ 8881 ],
'depends_on' : [ "consul", "voltha" ],
- 'links' : [ "consul", "fluentd" ],
+ 'links' : { "consul" : "consul", "fluentd" : "fluentd" },
'environment' : ["SERVICE_8881_NAME=chamleon-rest"],
- 'volumes' : '/var/run/docker.sock:/tmp/docker.sock'
+ 'volumes' : { '/var/run/docker.sock' : '/tmp/docker.sock' }
}
}
@@ -33,9 +33,8 @@
self.running = False
self.index = 0
(host, port) = arg.split(':')
- self.conn = consul.Consul(host=host, port=port)
+ self.conn = Consul(host=host, port=port)
- @inlineCallbacks
def run(self):
if self.running:
return
@@ -43,36 +42,50 @@
self.log.info('Running consul manager')
- reactor.callLater(0, self.provision_voltha_instances())
+ reactor.callLater(0, self.provision_voltha_instances, None)
reactor.addSystemEventTrigger('before', 'shutdown', self.shutdown)
returnValue(self)
- @inlineCallbacks
+
def shutdown(self):
self.log.info('Shutting down consul manager')
self.running = False
- @inlineCallbacks
- def provision_voltha_instances(self):
- while True:
- if not self.running:
- return
- # maintain index such that callbacks only happen is something has changed
- # timeout is default to 5m
- (self.index, data) = self.conn.catalog.service(service='voltha-grpc',
- index=self.index)
- self.start_containers(data)
- def start_containers(self, data):
+ def provision_voltha_instances(self, _):
+ if not self.running:
+ return
+ # maintain index such that callbacks only happen is something has changed
+ # timeout is default to 5m
+ deferred = self.conn.catalog.service(wait='5s', service='voltha-grpc',
+ index=self.index)
+ deferred.addCallbacks(self.provision, self.fail)
+ deferred.addBoth(self.provision_voltha_instances)
+
+ def provision(self, result):
+ (self.index, data) = result
+ self.log.info('PROVISIONING {}'.format(data))
for item in data:
- serviceId = item['ServiceID'].split(':')[1].split('_')[2]
- serviceTags = item['ServiceTags']
- self.log.info('voltha instance %s, with tags %s' % (serviceId, serviceTags))
- for tag in serviceTags:
- if tag in containers:
- netcfg = self.create_network(serviceId, tag)
- self.create_container(serviceId, tag, netcfg)
+ (service, id) = item['ServiceID'].split(':')[1].split('_')[1:3]
+ self.log.info('got {} {}'.format(service, id))
+ self.podProvisioned(service, id, item['ServiceTags'])
+
+ def fail(self, err):
+ self.log.info('Failure %s'.format(err))
+
+ def start_containers(self, result, service, id, tags):
+ self.log.info("wtf : {}".format(result))
+ (_, done) = result
+ self.log.info("result : {}, {}".format(done, type(done)))
+ if done:
+ return
+ self.log.info('provisioning voltha instance {}, with tags {}'.format(id, tags))
+ for tag in tags:
+ if tag in containers:
+ netcfg = self.create_network(id, tag)
+ if self.create_container(id, tag, netcfg):
+ self.markProvisioned(service, id)
def create_network(self, id, tag):
@@ -81,10 +94,24 @@
def create_container(self, id, tag, netcfg):
args = {}
- args['image'] = containers['image']
+ args['image'] = containers[tag]['image']
args['networking_config'] = netcfg
- args['command'] = containers['command']
- args['ports'] = containers['ports']
- args['environment'] = containers['environment']
- args['volumes'] = containers['volumes']
- start_container(args)
\ No newline at end of file
+ args['command'] = containers[tag]['command']
+ args['ports'] = containers[tag]['ports']
+ args['environment'] = containers[tag]['environment']
+ args['volumes'] = containers[tag]['volumes'].keys()
+ args['host_config'] = create_host_config(containers[tag]['volumes'],
+ containers[tag]['ports'])
+ args['name'] = 'podder_%s_%s' % (tag, id)
+ start_container(args)
+ #TODO check container is running
+
+ return True
+
+ def podProvisioned(self, service, id, tags):
+ d = self.conn.kv.get('podder/%s/%s/state' % (service, id))
+ d.addCallback(self.start_containers, service, id, tags)
+ d.addErrback(lambda err: self.log.info("FAIL {}".format(err)))
+
+ def markProvisioned(self, service, id):
+ self.conn.kv.put('podder/%s/%s/state' % (service, id), "started")