Scott Baker | b63ea79 | 2016-08-11 10:24:48 -0700 | [diff] [blame] | 1 | import hashlib |
| 2 | import os |
| 3 | import socket |
| 4 | import sys |
| 5 | import base64 |
| 6 | import time |
Scott Baker | af599eb | 2017-03-21 12:43:26 -0700 | [diff] [blame] | 7 | from synchronizers.new_base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible |
| 8 | from synchronizers.new_base.syncstep import DeferredException |
| 9 | from synchronizers.new_base.ansible_helper import run_template_ssh |
Scott Baker | b63ea79 | 2016-08-11 10:24:48 -0700 | [diff] [blame] | 10 | from xos.logger import Logger, logging |
Scott Baker | af599eb | 2017-03-21 12:43:26 -0700 | [diff] [blame] | 11 | from synchronizers.new_base.modelaccessor import * |
Scott Baker | b63ea79 | 2016-08-11 10:24:48 -0700 | [diff] [blame] | 12 | |
| 13 | # hpclibrary will be in steps/.. |
| 14 | parentdir = os.path.join(os.path.dirname(__file__),"..") |
| 15 | sys.path.insert(0,parentdir) |
| 16 | |
| 17 | logger = Logger(level=logging.INFO) |
| 18 | |
| 19 | class SyncContainer(SyncInstanceUsingAnsible): |
| 20 | provides=[Instance] |
| 21 | observes=Instance |
| 22 | requested_interval=0 |
| 23 | template_name = "sync_container.yaml" |
| 24 | |
| 25 | def __init__(self, *args, **kwargs): |
| 26 | super(SyncContainer, self).__init__(*args, **kwargs) |
| 27 | |
| 28 | def fetch_pending(self, deletion=False): |
| 29 | objs = super(SyncContainer, self).fetch_pending(deletion) |
| 30 | objs = [x for x in objs if x.isolation in ["container", "container_vm"]] |
| 31 | return objs |
| 32 | |
| 33 | def get_instance_port(self, container_port): |
| 34 | for p in container_port.network.links.all(): |
Scott Baker | af599eb | 2017-03-21 12:43:26 -0700 | [diff] [blame] | 35 | if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node.id == container_port.instance.node.id) and (p.mac): |
Scott Baker | b63ea79 | 2016-08-11 10:24:48 -0700 | [diff] [blame] | 36 | return p |
| 37 | return None |
| 38 | |
| 39 | def get_parent_port_mac(self, instance, port): |
| 40 | if not instance.parent: |
| 41 | raise Exception("instance has no parent") |
| 42 | for parent_port in instance.parent.ports.all(): |
| 43 | if parent_port.network == port.network: |
| 44 | if not parent_port.mac: |
| 45 | raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name) |
| 46 | return parent_port.mac |
| 47 | raise Exception("failed to find corresponding parent port for network %s" % port.network.name) |
| 48 | |
| 49 | def get_ports(self, o): |
| 50 | i=0 |
| 51 | ports = [] |
| 52 | if (o.slice.network in ["host", "bridged"]): |
| 53 | pass # no ports in host or bridged mode |
| 54 | else: |
| 55 | for port in o.ports.all(): |
| 56 | if (not port.ip): |
| 57 | # 'unmanaged' ports may have an ip, but no mac |
| 58 | # XXX: are there any ports that have a mac but no ip? |
| 59 | raise DeferredException("Port on network %s is not yet ready" % port.network.name) |
| 60 | |
| 61 | pd={} |
| 62 | pd["mac"] = port.mac or "" |
| 63 | pd["ip"] = port.ip or "" |
| 64 | pd["xos_network_id"] = port.network.id |
| 65 | |
| 66 | if port.network.name == "wan_network": |
| 67 | if port.ip: |
| 68 | (a, b, c, d) = port.ip.split('.') |
| 69 | pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d)) |
| 70 | |
| 71 | |
| 72 | if o.isolation == "container": |
| 73 | # container on bare metal |
| 74 | instance_port = self.get_instance_port(port) |
| 75 | if not instance_port: |
| 76 | raise DeferredException("No instance on slice for port on network %s" % port.network.name) |
| 77 | |
| 78 | pd["snoop_instance_mac"] = instance_port.mac |
| 79 | pd["snoop_instance_id"] = instance_port.instance.instance_id |
| 80 | pd["src_device"] = "" |
| 81 | pd["bridge"] = "br-int" |
| 82 | else: |
| 83 | # container in VM |
| 84 | pd["snoop_instance_mac"] = "" |
| 85 | pd["snoop_instance_id"] = "" |
| 86 | pd["parent_mac"] = self.get_parent_port_mac(o, port) |
| 87 | pd["bridge"] = "" |
| 88 | |
| 89 | for (k,v) in port.get_parameters().items(): |
| 90 | pd[k] = v |
| 91 | |
| 92 | ports.append(pd) |
| 93 | |
| 94 | # for any ports that don't have a device, assign one |
| 95 | used_ports = [x["device"] for x in ports if ("device" in x)] |
| 96 | avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)] |
| 97 | for port in ports: |
| 98 | if not port.get("device",None): |
| 99 | port["device"] = avail_ports.pop(0) |
| 100 | |
| 101 | return ports |
| 102 | |
| 103 | def get_extra_attributes(self, o): |
| 104 | fields={} |
| 105 | fields["ansible_tag"] = "container-%s" % str(o.id) |
| 106 | if o.image.tag: |
| 107 | fields["docker_image"] = o.image.path + ":" + o.image.tag |
| 108 | else: |
| 109 | fields["docker_image"] = o.image.path |
| 110 | fields["ports"] = self.get_ports(o) |
| 111 | if o.volumes: |
| 112 | fields["volumes"] = [x.strip() for x in o.volumes.split(",")] |
| 113 | else: |
| 114 | fields["volumes"] = "" |
| 115 | fields["network_method"] = o.slice.network or "default" |
| 116 | return fields |
| 117 | |
| 118 | def sync_record(self, o): |
| 119 | logger.info("sync'ing object %s" % str(o),extra=o.tologdict()) |
| 120 | |
| 121 | fields = self.get_ansible_fields(o) |
| 122 | |
| 123 | # If 'o' defines a 'sync_attributes' list, then we'll copy those |
| 124 | # attributes into the Ansible recipe's field list automatically. |
| 125 | if hasattr(o, "sync_attributes"): |
| 126 | for attribute_name in o.sync_attributes: |
| 127 | fields[attribute_name] = getattr(o, attribute_name) |
| 128 | |
| 129 | fields.update(self.get_extra_attributes(o)) |
| 130 | |
| 131 | self.run_playbook(o, fields) |
| 132 | |
| 133 | o.instance_id = fields["container_name"] |
| 134 | o.instance_name = fields["container_name"] |
| 135 | |
| 136 | o.save() |
| 137 | |
| 138 | def delete_record(self, o): |
| 139 | logger.info("delete'ing object %s" % str(o),extra=o.tologdict()) |
| 140 | |
| 141 | fields = self.get_ansible_fields(o) |
| 142 | |
| 143 | # If 'o' defines a 'sync_attributes' list, then we'll copy those |
| 144 | # attributes into the Ansible recipe's field list automatically. |
| 145 | if hasattr(o, "sync_attributes"): |
| 146 | for attribute_name in o.sync_attributes: |
| 147 | fields[attribute_name] = getattr(o, attribute_name) |
| 148 | |
| 149 | fields.update(self.get_extra_attributes(o)) |
| 150 | |
| 151 | self.run_playbook(o, fields, "teardown_container.yaml") |
| 152 | |
| 153 | def run_playbook(self, o, fields, template_name=None): |
| 154 | if not template_name: |
| 155 | template_name = self.template_name |
| 156 | tStart = time.time() |
Sapan Bhatia | c76afbe | 2017-02-04 09:26:31 -0800 | [diff] [blame] | 157 | run_template_ssh(template_name, fields, path="container", object=o) |
Scott Baker | b63ea79 | 2016-08-11 10:24:48 -0700 | [diff] [blame] | 158 | logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict()) |
| 159 | |
| 160 | |