Merge branch 'release/1.1.0'
diff --git a/.gitignore b/.gitignore
index cc49c01..451dd79 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@
xos/core/xoslib/docs
xos/core/xoslib/coverage
node_modules
+xos/xos_configuration/*
diff --git a/xos/configurations/cord/ceilometer.yaml b/xos/configurations/cord/ceilometer.yaml
index b64fbef..82e697a 100644
--- a/xos/configurations/cord/ceilometer.yaml
+++ b/xos/configurations/cord/ceilometer.yaml
@@ -51,6 +51,9 @@
mysite:
type: tosca.nodes.Site
+ trusty-server-multi-nic:
+ type: tosca.nodes.Image
+
mysite_ceilometer:
description: Ceilometer Proxy Slice
type: tosca.nodes.Slice
@@ -61,6 +64,11 @@
- site:
node: mysite
relationship: tosca.relationships.MemberOfSite
+ - default_image:
+ node: trusty-server-multi-nic
+ relationship: tosca.relationships.DefaultImage
+ properties:
+ default_flavor: m1.small
my_ceilometer_tenant:
description: Ceilometer Service default Tenant
@@ -72,9 +80,8 @@
Ceilometer:
type: tosca.nodes.DashboardView
- artifacts:
- ceilometer_url: template:xosCeilometerDashboard
-
+ properties:
+ url: template:xosCeilometerDashboard
Tenant:
type: tosca.nodes.DashboardView
properties:
diff --git a/xos/core/admin.py b/xos/core/admin.py
index 03f8c83..97e76ff 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -1054,7 +1054,7 @@
class SliceAdmin(XOSBaseAdmin):
form = SliceForm
- fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances', "default_isolation"]
+ fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances', "default_isolation", "network"]
fieldsets = [('Slice Details', {'fields': fieldList, 'classes':['suit-tab suit-tab-general']}),]
readonly_fields = ('backend_status_text', )
list_display = ('backend_status_icon', 'name', 'site','serviceClass', 'slice_url', 'max_instances')
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index df36b26..84622cf 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -20,6 +20,7 @@
class Slice(PlCoreBase):
ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
+ NETWORK_CHOICES = ((None, 'Default'), ('host', 'Host'), ('bridged', 'Bridged'))
name = StrippedCharField(unique=True, help_text="The Name of the Slice", max_length=80)
enabled = models.BooleanField(default=True, help_text="Status for this Slice")
@@ -29,7 +30,7 @@
site = models.ForeignKey(Site, related_name='slices', help_text="The Site this Slice belongs to")
max_instances = models.IntegerField(default=10)
service = models.ForeignKey(Service, related_name='slices', null=True, blank=True)
- network = StrippedCharField(default="Private Only",null=True, blank=True, max_length=256)
+ network = models.CharField(null=True, blank=True, max_length=256, choices=NETWORK_CHOICES)
tags = generic.GenericRelation(Tag)
serviceClass = models.ForeignKey(ServiceClass, related_name = "slices", null=True, default=get_default_serviceclass)
creator = models.ForeignKey(User, related_name='slices', blank=True, null=True)
@@ -83,6 +84,11 @@
if not self.creator:
raise ValidationError('slice has no creator')
+ if self.network=="Private Only":
+ # "Private Only" was the default from the old Tenant View
+ self.network=None
+ self.enforce_choices(self.network, self.NETWORK_CHOICES)
+
super(Slice, self).save(*args, **kwds)
def can_update(self, user):
diff --git a/xos/core/xoslib/methods/ceilometerview.py b/xos/core/xoslib/methods/ceilometerview.py
index 5e0ac35..a65bd11 100644
--- a/xos/core/xoslib/methods/ceilometerview.py
+++ b/xos/core/xoslib/methods/ceilometerview.py
@@ -1078,6 +1078,18 @@
query.append({"field": "meter", "op": "eq", "value": meter_name})
samples = sample_list(request, meter_name,
ceilometer_url=tenant_ceilometer_url, query=query, limit=limit)
+ if samples:
+ tenant_map = getTenantControllerTenantMap(request.user)
+ resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url)
+ for sample in samples:
+ if sample["project_id"] in tenant_map.keys():
+ sample["slice"] = tenant_map[sample["project_id"]]["slice"]
+ else:
+ sample["slice"] = sample["project_id"]
+ if sample["resource_id"] in resource_map.keys():
+ sample["resource_name"] = resource_map[sample["resource_id"]]
+ else:
+ sample["resource_name"] = sample["resource_id"]
return Response(samples)
class ServiceAdjustScale(APIView):
@@ -1085,7 +1097,7 @@
method_name = "serviceadjustscale"
def get(self, request, format=None):
- if (not request.user.is_authenticated()) or (not request.user.is_admin()):
+ if (not request.user.is_authenticated()) or (not request.user.is_admin):
raise PermissionDenied("You must be authenticated admin user in order to use this API")
service = request.QUERY_PARAMS.get('service', None)
slice_hint = request.QUERY_PARAMS.get('slice_hint', None)
@@ -1097,5 +1109,5 @@
if not services or (not services.get(name=service)):
raise XOSMissingField("Service not found")
service = services.get(name=service)
- service.adjust_scale(slice_hint, scale)
+ service.adjust_scale(slice_hint, int(scale))
return Response("Success")
diff --git a/xos/model_policies/model_policy_Instance.py b/xos/model_policies/model_policy_Instance.py
index ffc9847..dd1a8d5 100644
--- a/xos/model_policies/model_policy_Instance.py
+++ b/xos/model_policies/model_policy_Instance.py
@@ -6,7 +6,7 @@
if instance.deleted:
return
- if (instance.isolation in ["container"]):
+ if (instance.isolation in ["container"]) and (instance.slice.network not in ["host", "bridged"]):
# Our current docker-on-metal network strategy requires that there be some
# VM on the server that connects to the networks, so that
# the containers can piggyback off of that configuration.
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_pub_sub_consumer.py b/xos/observers/monitoring_channel/templates/ceilometer_pub_sub_consumer.py
index ecbabb9..7d111e3 100644
--- a/xos/observers/monitoring_channel/templates/ceilometer_pub_sub_consumer.py
+++ b/xos/observers/monitoring_channel/templates/ceilometer_pub_sub_consumer.py
@@ -7,6 +7,8 @@
import time, thread, threading
projects_map = {}
+xos_tenant_info_map = {}
+xos_instances_info_map = {}
UDP_IP = "0.0.0.0"
UDP_PORT = 12346
@@ -43,10 +45,10 @@
print ""
print ""
for project in projects_map.keys():
- print "project=%s, alarm_state=%s" % (project, projects_map[project]['alarm'])
+ print "service=%s slice=%s, alarm_state=%s" % (projects_map[project]['xos_tenant_info']['service'] if projects_map[project]['xos_tenant_info'] else None, projects_map[project]['xos_tenant_info']['slice'] if projects_map[project]['xos_tenant_info'] else project, projects_map[project]['alarm'])
for resource in projects_map[project]['resources'].keys():
- print "resource=%s" % resource
- for i in projects_map[project]['resources'][resource]:
+ print "resource=%s" % (projects_map[project]['resources'][resource]['xos_instance_info']['instance_name'] if projects_map[project]['resources'][resource]['xos_instance_info'] else resource)
+ for i in projects_map[project]['resources'][resource]['queue']:
print " time=%s val=%s" % ( i['timestamp'],i['counter_volume'])
def periodic_print():
@@ -64,23 +66,51 @@
SCALE_UP_ALARM = 'scale_up'
SCALE_DOWN_ALARM = 'scale_down'
-def getXosTenantInfo(project):
- print "SRIKANTH: Getting XOS info for openstack Project %s" % project
+def loadAllXosTenantInfo():
+ print "SRIKANTH: Loading all XOS tenant info"
url = "http://ctl:9999/xos/controllerslices/"
admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
controller_slices = requests.get(url, auth=admin_auth).json()
for cslice in controller_slices:
- if cslice['tenant_id'] == project:
- print "SRIKANTH: Matching controller_slice=%s" % cslice['humanReadableName']
- slice = requests.get(cslice['slice'], auth=admin_auth).json()
- slice_name = slice['humanReadableName']
- print "SRIKANTH: Matching slice=%s" % slice_name
+ slice = requests.get(cslice['slice'], auth=admin_auth).json()
+ slice_name = slice['humanReadableName']
+ if slice['service']:
service = requests.get(slice['service'], auth=admin_auth).json()
service_name = service['humanReadableName']
- print "SRIKANTH: Matching service=%s" % service_name
- return {'service':service_name, 'slice':slice_name}
- logger.warn("SRIKANTH: Project %(project)s has no associated XOS slice" % {'project':project})
- return None
+ else:
+ service_name = None
+ xos_tenant_info_map[cslice['tenant_id']] = {'service':service_name, 'slice':slice_name}
+ print "SRIKANTH: Project: %s Service:%s Slice:%s" % (cslice['tenant_id'],service_name,slice_name)
+
+def loadAllXosInstanceInfo():
+ print "SRIKANTH: Loading all XOS instance info"
+ url = "http://130.127.133.87:9999/xos/instances/"
+ admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
+ xos_instances = requests.get(url, auth=admin_auth).json()
+ for instance in xos_instances:
+ xos_instances_info_map[instance['instance_uuid']] = {'instance_name':instance['instance_name']}
+
+def getXosTenantInfo(project):
+ xos_tenant_info = xos_tenant_info_map.get(project, None)
+ if xos_tenant_info:
+ return xos_tenant_info
+ else:
+ loadAllXosTenantInfo()
+ xos_tenant_info = xos_tenant_info_map.get(project, None)
+ if not xos_tenant_info:
+ print "SRIKANTH: Project %s has no associated XOS slice" % project
+ return xos_tenant_info
+
+def getXosInstanceInfo(resource):
+ xos_instance_info = xos_instances_info_map.get(resource, None)
+ if xos_instance_info:
+ return xos_instance_info
+ else:
+ loadAllXosInstanceInfo()
+ xos_instance_info = xos_instances_info_map.get(resource, None)
+ if not xos_instance_info:
+ print "SRIKANTH: Resource %s has no associated XOS instance" % project
+ return xos_instance_info
def handle_adjust_scale(project, adjust):
if (adjust != 'up') and (adjust != 'down'):
@@ -93,7 +123,8 @@
if (current_instances >=2 and adjust == 'up'):
print "SRIKANTH: %s is running with already maximum instances and can not scale up further " % project
return
- xos_tenant = getXosTenantInfo(project)
+ #xos_tenant = getXosTenantInfo(project)
+ xos_tenant = projects_map[project]['xos_tenant_info']
if not xos_tenant:
print "SRIKANTH: Can not handle adjust_scale for Project %s because not associated with any slice" % project
return
@@ -111,8 +142,8 @@
def periodic_cpu_threshold_evaluator():
for project in projects_map.keys():
- aggregate_cpu_util = sum([resource_queue[-1]['counter_volume'] \
- for resource_queue in projects_map[project]['resources'].values()]) \
+ aggregate_cpu_util = sum([resource['queue'][-1]['counter_volume'] \
+ for resource in projects_map[project]['resources'].values()]) \
/len(projects_map[project]['resources'].keys())
if (projects_map[project]['alarm'] == INITIAL_STATE or
@@ -172,14 +203,17 @@
continue
if sample['project_id'] not in projects_map.keys():
projects_map[sample['project_id']] = {}
+ projects_map[sample['project_id']]['xos_tenant_info'] = getXosTenantInfo(sample['project_id'])
projects_map[sample['project_id']]['resources'] = {}
projects_map[sample['project_id']]['uthreadshold_count'] = 0
projects_map[sample['project_id']]['lthreadshold_count'] = 0
projects_map[sample['project_id']]['alarm'] = INITIAL_STATE
resource_map = projects_map[sample['project_id']]['resources']
if sample['resource_id'] not in resource_map.keys():
- resource_map[sample['resource_id']] = collections.deque(maxlen=10)
- samples_map = resource_map[sample['resource_id']]
+ resource_map[sample['resource_id']] = {}
+ resource_map[sample['resource_id']]['xos_instance_info'] = getXosInstanceInfo(sample['resource_id'])
+ resource_map[sample['resource_id']]['queue'] = collections.deque(maxlen=10)
+ samples_map = resource_map[sample['resource_id']]['queue']
sample = {'counter_name':sample['counter_name'],
'project_id':sample['project_id'],
'resource_id':sample['resource_id'],
@@ -195,6 +229,8 @@
if not monitoring_channel:
print 'SRIKANTH: XOS monitoring_channel is not created... Create it before using this app'
return
+ loadAllXosTenantInfo()
+ loadAllXosInstanceInfo()
thread.start_new(read_notification_from_ceilometer,(UDP_IP,UDP_PORT,))
ceilometer_url = monitoring_channel['ceilometer_url']
subscribe_data = {"sub_info":"cpu_util","app_id":"xos_auto_scale","target":"udp://10.11.10.1:12346"}
diff --git a/xos/observers/onos/scripts/dockerip.sh b/xos/observers/onos/scripts/dockerip.sh
index 7684f3e..4bf355d 100644
--- a/xos/observers/onos/scripts/dockerip.sh
+++ b/xos/observers/onos/scripts/dockerip.sh
@@ -1,3 +1,4 @@
#!/bin/sh
-docker inspect --format '{{ .NetworkSettings.IPAddress }}' $1
+docker inspect --format '{{ .NetworkSettings.IPAddress }}' $1 | tr -d '\n' | tr -d '\r'
+
diff --git a/xos/observers/onos/steps/sync_onosapp.py b/xos/observers/onos/steps/sync_onosapp.py
index 97bb8a6..0feb2e5 100644
--- a/xos/observers/onos/steps/sync_onosapp.py
+++ b/xos/observers/onos/steps/sync_onosapp.py
@@ -106,14 +106,23 @@
self.write_configs(o)
def get_extra_attributes(self, o):
+ instance = self.get_instance(o)
+
fields={}
fields["files_dir"] = o.files_dir
fields["appname"] = o.name
- fields["nat_ip"] = self.get_instance(o).get_ssh_ip()
+ fields["nat_ip"] = instance.get_ssh_ip()
fields["config_fns"] = o.config_fns
fields["rest_configs"] = o.rest_configs
- fields["dependencies"] = [x.strip() for x in o.dependencies.split(",")]
- fields["ONOS_container"] = "ONOS"
+ if o.dependencies:
+ fields["dependencies"] = [x.strip() for x in o.dependencies.split(",")]
+ else:
+ fields["dependencies"] = []
+
+ if (instance.isolation=="container"):
+ fields["ONOS_container"] = "%s-%s" % (instance.slice.name, str(instance.id))
+ else:
+ fields["ONOS_container"] = "ONOS"
return fields
def sync_fields(self, o, fields):
diff --git a/xos/observers/onos/steps/sync_onosapp.yaml b/xos/observers/onos/steps/sync_onosapp.yaml
index 9105a2e..496fad0 100644
--- a/xos/observers/onos/steps/sync_onosapp.yaml
+++ b/xos/observers/onos/steps/sync_onosapp.yaml
@@ -2,7 +2,7 @@
- hosts: {{ instance_name }}
gather_facts: False
connection: ssh
- user: ubuntu
+ user: {{ username }}
sudo: yes
vars:
appname: {{ appname }}
@@ -17,6 +17,20 @@
tasks:
+ - name: Get Docker IP
+ script: /opt/xos/observers/onos/scripts/dockerip.sh {{ ONOS_container }}
+ register: onosaddr
+
+ - name: Wait for ONOS to come up
+ wait_for:
+ host={{ '{{' }} onosaddr.stdout {{ '}}' }}
+ port={{ '{{' }} item {{ '}}' }}
+ state=present
+ with_items:
+ - 8101
+ - 8181
+ - 9876
+
- name: Config file directory
file:
path=/home/ubuntu/{{ appname }}/
@@ -49,9 +63,10 @@
- name: Wait for ONOS to install the apps
wait_for: timeout=15
+{% if dependencies %}
- name: Add dependencies to ONOS
uri:
- url: http://localhost:8181/onos/v1/applications/{{ '{{' }} item {{ '}}' }}/active
+ url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/onos/v1/applications/{{ '{{' }} item {{ '}}' }}/active
method: POST
user: karaf
password: karaf
@@ -59,13 +74,14 @@
{% for dependency in dependencies %}
- {{ dependency }}
{% endfor %}
+{% endif %}
{% if rest_configs %}
# Do this after services have been activated, or it will cause an exception.
# vOLT will re-read its net config; vbng may not.
- name: Add ONOS configuration values
uri:
- url: http://localhost:8181/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
+ url: http://{{ '{{' }} onosaddr.stdout {{ '}}' }}:8181/{{ '{{' }} item.endpoint {{ '}}' }} #http://localhost:8181/onos/v1/network/configuration/
body: "{{ '{{' }} item.body {{ '}}' }}"
body_format: raw
method: POST
diff --git a/xos/observers/onos/steps/sync_onosservice.py b/xos/observers/onos/steps/sync_onosservice.py
index 65fa44e..420904a 100644
--- a/xos/observers/onos/steps/sync_onosservice.py
+++ b/xos/observers/onos/steps/sync_onosservice.py
@@ -63,10 +63,14 @@
def sync_fields(self, o, fields):
# the super causes the playbook to be run
-
super(SyncONOSService, self).sync_fields(o, fields)
def run_playbook(self, o, fields):
+ instance = self.get_instance(o)
+ if (instance.isolation=="container"):
+ # If the instance is already a container, then we don't need to
+ # install ONOS.
+ return
super(SyncONOSService, self).run_playbook(o, fields)
def delete_record(self, m):
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
index 4f3886e..0985d22 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -22,6 +22,9 @@
logger = Logger(level=logging.INFO)
+PARENTAL_MECHANISM="dnsmasq"
+ENABLE_QUICK_UPDATE=False
+
class SyncVCPETenant(SyncInstanceUsingAnsible):
provides=[VCPETenant]
observes=VCPETenant
@@ -125,6 +128,15 @@
except:
full_setup = True
+ safe_macs=[]
+ if o.volt and o.volt.subscriber:
+ for user in o.volt.subscriber.users:
+ level = user.get("level",None)
+ mac = user.get("mac",None)
+ if level in ["G", "PG"]:
+ if mac:
+ safe_macs.append(mac)
+
fields = {"vlan_ids": vlan_ids, # XXX remove this
"s_tags": s_tags,
"c_tags": c_tags,
@@ -132,7 +144,8 @@
"cdn_prefixes": cdn_prefixes,
"bbs_addrs": bbs_addrs,
"full_setup": full_setup,
- "isolation": o.instance.isolation}
+ "isolation": o.instance.isolation,
+ "safe_browsing_macs": safe_macs}
# add in the sync_attributes that come from the SubscriberRoot object
@@ -169,45 +182,46 @@
url_filter_level = o.volt.subscriber.url_filter_level
url_filter_users = o.volt.subscriber.users
- # disable url_filter if there are no bbs_addrs
- if url_filter_enable and (not fields.get("bbs_addrs",[])):
- logger.info("disabling url_filter because there are no bbs_addrs")
- url_filter_enable = False
+ if PARENTAL_MECHANISM=="broadbandshield":
+ # disable url_filter if there are no bbs_addrs
+ if url_filter_enable and (not fields.get("bbs_addrs",[])):
+ logger.info("disabling url_filter because there are no bbs_addrs")
+ url_filter_enable = False
- if url_filter_enable:
- bbs_hostname = None
- if service.bbs_api_hostname and service.bbs_api_port:
- bbs_hostname = service.bbs_api_hostname
- else:
- # TODO: extract from slice
- bbs_hostname = "cordcompute01.onlab.us"
-
- if service.bbs_api_port:
- bbs_port = service.bbs_api_port
- else:
- bbs_port = 8018
-
- if not bbs_hostname:
- logger.info("broadbandshield is not configured")
- else:
- tStart = time.time()
- bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
- bbs.sync(url_filter_level, url_filter_users)
-
- if o.hpc_client_ip:
- logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip))
- bbs.associate(o.hpc_client_ip)
+ if url_filter_enable:
+ bbs_hostname = None
+ if service.bbs_api_hostname and service.bbs_api_port:
+ bbs_hostname = service.bbs_api_hostname
else:
- logger.info("no hpc_client_ip to associate")
+ # TODO: extract from slice
+ bbs_hostname = "cordcompute01.onlab.us"
- logger.info("bbs update time %d" % int(time.time()-tStart))
+ if service.bbs_api_port:
+ bbs_port = service.bbs_api_port
+ else:
+ bbs_port = 8018
+
+ if not bbs_hostname:
+ logger.info("broadbandshield is not configured")
+ else:
+ tStart = time.time()
+ bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
+ bbs.sync(url_filter_level, url_filter_users)
+
+ if o.hpc_client_ip:
+ logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip))
+ bbs.associate(o.hpc_client_ip)
+ else:
+ logger.info("no hpc_client_ip to associate")
+
+ logger.info("bbs update time %d" % int(time.time()-tStart))
def run_playbook(self, o, fields):
ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
quick_update = (o.last_ansible_hash == ansible_hash)
- if quick_update:
+ if ENABLE_QUICK_UPDATE and quick_update:
logger.info("quick_update triggered; skipping ansible recipe")
else:
if o.instance.isolation in ["container", "container_vm"]:
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index f7cc02f..db6ecf5 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -50,6 +50,10 @@
rabbit_user: {{ rabbit_user }}
rabbit_password: {{ rabbit_password }}
rabbit_host: {{ rabbit_host }}
+ safe_browsing:
+ {% for mac in safe_browsing_macs %}
+ - {{ mac }}
+ {% endfor %}
tasks:
{% if full_setup %}
@@ -167,3 +171,4 @@
- name: start vcpe
service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
+
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
index bb64a71..960b480 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
@@ -51,6 +51,10 @@
rabbit_user: {{ rabbit_user }}
rabbit_password: {{ rabbit_password }}
rabbit_host: {{ rabbit_host }}
+ safe_browsing:
+ {% for mac in safe_browsing_macs %}
+ - {{ mac }}
+ {% endfor %}
tasks:
- name: vCPE basic dnsmasq config
@@ -67,3 +71,4 @@
# Dnsmasq is automatically restarted in the container
- name: restart dnsmasq
shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
+
diff --git a/xos/openstack_observer/steps/sync_container.py b/xos/openstack_observer/steps/sync_container.py
index 039fb55..adb81c9 100644
--- a/xos/openstack_observer/steps/sync_container.py
+++ b/xos/openstack_observer/steps/sync_container.py
@@ -52,51 +52,54 @@
def get_ports(self, o):
i=0
ports = []
- for port in o.ports.all():
- if (not port.ip):
- # 'unmanaged' ports may have an ip, but no mac
- # XXX: are there any ports that have a mac but no ip?
- raise DeferredException("Port on network %s is not yet ready" % port.network.name)
+ if (o.slice.network in ["host", "bridged"]):
+ pass # no ports in host or bridged mode
+ else:
+ for port in o.ports.all():
+ if (not port.ip):
+ # 'unmanaged' ports may have an ip, but no mac
+ # XXX: are there any ports that have a mac but no ip?
+ raise DeferredException("Port on network %s is not yet ready" % port.network.name)
- pd={}
- pd["mac"] = port.mac or ""
- pd["ip"] = port.ip or ""
- pd["xos_network_id"] = port.network.id
+ pd={}
+ pd["mac"] = port.mac or ""
+ pd["ip"] = port.ip or ""
+ pd["xos_network_id"] = port.network.id
- if port.network.name == "wan_network":
- if port.ip:
- (a, b, c, d) = port.ip.split('.')
- pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+ if port.network.name == "wan_network":
+ if port.ip:
+ (a, b, c, d) = port.ip.split('.')
+ pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
- if o.isolation == "container":
- # container on bare metal
- instance_port = self.get_instance_port(port)
- if not instance_port:
- raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+ if o.isolation == "container":
+ # container on bare metal
+ instance_port = self.get_instance_port(port)
+ if not instance_port:
+ raise DeferredException("No instance on slice for port on network %s" % port.network.name)
- pd["snoop_instance_mac"] = instance_port.mac
- pd["snoop_instance_id"] = instance_port.instance.instance_id
- pd["src_device"] = ""
- pd["bridge"] = "br-int"
- else:
- # container in VM
- pd["snoop_instance_mac"] = ""
- pd["snoop_instance_id"] = ""
- pd["parent_mac"] = self.get_parent_port_mac(o, port)
- pd["bridge"] = ""
+ pd["snoop_instance_mac"] = instance_port.mac
+ pd["snoop_instance_id"] = instance_port.instance.instance_id
+ pd["src_device"] = ""
+ pd["bridge"] = "br-int"
+ else:
+ # container in VM
+ pd["snoop_instance_mac"] = ""
+ pd["snoop_instance_id"] = ""
+ pd["parent_mac"] = self.get_parent_port_mac(o, port)
+ pd["bridge"] = ""
- for (k,v) in port.get_parameters().items():
- pd[k] = v
+ for (k,v) in port.get_parameters().items():
+ pd[k] = v
- ports.append(pd)
+ ports.append(pd)
- # for any ports that don't have a device, assign one
- used_ports = [x["device"] for x in ports if ("device" in x)]
- avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
- for port in ports:
- if not port.get("device",None):
- port["device"] = avail_ports.pop(0)
+ # for any ports that don't have a device, assign one
+ used_ports = [x["device"] for x in ports if ("device" in x)]
+ avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+ for port in ports:
+ if not port.get("device",None):
+ port["device"] = avail_ports.pop(0)
return ports
@@ -112,6 +115,7 @@
fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
else:
fields["volumes"] = ""
+ fields["network_method"] = o.slice.network or "default"
return fields
def sync_record(self, o):
diff --git a/xos/openstack_observer/steps/sync_container.yaml b/xos/openstack_observer/steps/sync_container.yaml
index b60ffb8..77e57cd 100644
--- a/xos/openstack_observer/steps/sync_container.yaml
+++ b/xos/openstack_observer/steps/sync_container.yaml
@@ -8,6 +8,7 @@
vars:
container_name: {{ container_name }}
docker_image: {{ docker_image }}
+ network_method: {{ network_method }}
ports:
{% for port in ports %}
- device: {{ port.device }}
diff --git a/xos/openstack_observer/templates/start-container.sh.j2 b/xos/openstack_observer/templates/start-container.sh.j2
index 260666c..2fbf478 100644
--- a/xos/openstack_observer/templates/start-container.sh.j2
+++ b/xos/openstack_observer/templates/start-container.sh.j2
@@ -44,7 +44,13 @@
if [ "$?" == 1 ]
then
docker pull $IMAGE
+{% if network_method=="host" %}
+ docker run -d --name=$CONTAINER --privileged=true --net=host $VOLUME_ARGS $IMAGE
+{% elif network_method=="bridged" %}
+ docker run -d --name=$CONTAINER --privileged=true --net=bridge $VOLUME_ARGS $IMAGE
+{% else %}
docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
+{% endif %}
else
docker start $CONTAINER
fi
diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index 10910fb..dc23c84 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -622,6 +622,17 @@
type: string
required: false
description: default isolation to use when bringing up instances (default to 'vm')
+ default_flavor:
+ # Note: we should probably formally introduce flavors to Tosca
+ # at some point, and use a requirement/relationship instead of
+ # a text string.
+ type: string
+ required: false
+ description: default flavor to use for slice
+ network:
+ type: string
+ required: false
+ description: type of networking to use for this slice
tosca.nodes.Node:
derived_from: tosca.nodes.Root
@@ -720,6 +731,10 @@
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
+ tosca.relationships.DefaultImage:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.Image ]
+
tosca.relationships.SupportsImage:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 4c12e7f..1256c3b 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -817,6 +817,17 @@
type: string
required: false
description: default isolation to use when bringing up instances (default to 'vm')
+ default_flavor:
+ # Note: we should probably formally introduce flavors to Tosca
+ # at some point, and use a requirement/relationship instead of
+ # a text string.
+ type: string
+ required: false
+ description: default flavor to use for slice
+ network:
+ type: string
+ required: false
+ description: type of networking to use for this slice
tosca.nodes.Node:
derived_from: tosca.nodes.Root
@@ -937,6 +948,10 @@
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
+ tosca.relationships.DefaultImage:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.Image ]
+
tosca.relationships.SupportsImage:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index e37bfc8..ca11b77 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -5,14 +5,14 @@
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
-from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service
+from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service,Image,Flavor
from xosresource import XOSResource
class XOSSlice(XOSResource):
provides = "tosca.nodes.Slice"
xos_model = Slice
- copyin_props = ["enabled", "description", "slice_url", "max_instances", "default_isolation"]
+ copyin_props = ["enabled", "description", "slice_url", "max_instances", "default_isolation", "network"]
def get_xos_args(self):
args = super(XOSSlice, self).get_xos_args()
@@ -26,6 +26,16 @@
service = self.get_xos_object(Service, name=serviceName)
args["service"] = service
+ default_image_name = self.get_requirement("tosca.relationships.DefaultImage", throw_exception=False)
+ if default_image_name:
+ default_image = self.get_xos_object(Image, name=default_image_name, throw_exception=True)
+ args["default_image"] = default_image
+
+ default_flavor_name = self.get_property_default("default_flavor", None)
+ if default_flavor_name:
+ default_flavor = self.get_xos_object(Flavor, name=default_flavor_name, throw_exception=True)
+ args["default_flavor"] = default_flavor
+
return args
def postprocess(self, obj):
diff --git a/xos/tosca/samples/helloworld-chain.yaml b/xos/tosca/samples/helloworld-chain.yaml
index c72aa3d..8959f7f 100644
--- a/xos/tosca/samples/helloworld-chain.yaml
+++ b/xos/tosca/samples/helloworld-chain.yaml
@@ -16,6 +16,9 @@
mysite:
type: tosca.nodes.Site
+ trusty-server-multi-nic:
+ type: tosca.nodes.Image
+
service_vcpe:
type: tosca.nodes.Service
requirements:
@@ -50,6 +53,11 @@
- site:
node: mysite
relationship: tosca.relationships.MemberOfSite
+ - default_image:
+ node: trusty-server-multi-nic
+ relationship: tosca.relationships.DefaultImage
+ properties:
+ default_flavor: m1.small
helloworld_access:
type: tosca.nodes.network.Network
diff --git a/xos/tosca/samples/slice_default_image.yaml b/xos/tosca/samples/slice_default_image.yaml
new file mode 100644
index 0000000..91b95c7
--- /dev/null
+++ b/xos/tosca/samples/slice_default_image.yaml
@@ -0,0 +1,30 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+ * Create a new deployment, controller, and site.
+ * Add a SiteDeployment from the site to the deployment using the controller.
+ * Create a Slice in the Site, with one Instance
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ mysite:
+ type: tosca.nodes.Site
+
+ trusty-server-multi-nic:
+ type: tosca.nodes.Image
+
+ mysite_test1:
+ type: tosca.nodes.Slice
+ requirements:
+ - slice:
+ node: mysite
+ relationship: tosca.relationships.MemberOfSite
+ - default_image:
+ node: trusty-server-multi-nic
+ relationship: tosca.relationships.DefaultImage
+ properties:
+ default_flavor: m1.small
+
diff --git a/xos/tosca/samples/vtn.yaml b/xos/tosca/samples/vtn.yaml
new file mode 100644
index 0000000..9cb7c95
--- /dev/null
+++ b/xos/tosca/samples/vtn.yaml
@@ -0,0 +1,83 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ service_ONOS_VTN:
+ type: tosca.nodes.ONOSService
+ requirements:
+ properties:
+ kind: onos
+ view_url: /admin/onos/onosservice/$id$/
+ public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+ artifacts:
+ pubkey: /opt/xos/observers/onos/onos_key.pub
+
+ VTN_ONOS_app:
+ type: tosca.nodes.ONOSvBNGApp
+ requirements:
+ - onos_tenant:
+ node: service_ONOS_VTN
+ relationship: tosca.relationships.TenantOfService
+ properties:
+# dependencies: org.onosproject.proxyarp, org.onosproject.virtualbng, org.onosproject.openflow, org.onosproject.fwd
+
+ # docker image for vcpe containers
+ docker-onos:
+ # TODO: need to attach this to mydeployment
+ type: tosca.nodes.Image
+ properties:
+ kind: container
+ container_format: na
+ disk_format: na
+ path: onosproject/onos
+
+ mysite:
+ type: tosca.nodes.Site
+
+ mysite_onos_vtn:
+ description: ONOS Controller Slice for VTN
+ type: tosca.nodes.Slice
+ requirements:
+ - ONOS:
+ node: service_ONOS_VTN
+ relationship: tosca.relationships.MemberOfService
+ - site:
+ node: mysite
+ relationship: tosca.relationships.MemberOfSite
+ - onos_docker_image:
+ node: docker-onos
+ relationship: tosca.relationships.UsesImage
+ properties:
+ network: bridged
+ default_isolation: container
+
+ # Virtual machines
+ onos_app_vtn:
+ type: tosca.nodes.Compute.Container
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 MB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ architecture: x86_64
+ type: linux
+ distribution: Ubuntu
+ version: 14.10
+ requirements:
+ - slice:
+ node: mysite_onos_vtn
+ relationship: tosca.relationships.MemberOfSlice
+ - image:
+ node: docker-onos
+ relationship: tosca.relationships.UsesImage
diff --git a/xos/xos_configuration/xos_frontend_config b/xos/xos_configuration/xos_frontend_config
deleted file mode 100755
index 13fe53b..0000000
--- a/xos/xos_configuration/xos_frontend_config
+++ /dev/null
@@ -1,4 +0,0 @@
-[gui]
-branding_name=CORD
-branding_css=/static/cord.css
-branding_icon=/static/onos-logo.png