resolve merge conflicts
diff --git a/docker-cp.sh b/docker-cp.sh
old mode 100644
new mode 100755
diff --git a/xos/core/admin.py b/xos/core/admin.py
index afaf057..aa6f7f8 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -402,6 +402,7 @@
def queryset(self, request):
return Tag.select_by_user(request.user)
+<<<<<<< HEAD
class NetworkLookerUpper:
""" This is a callable that looks up a network name in a instance and returns
the ip address for that network.
@@ -437,7 +438,7 @@
class InstanceInline(XOSTabularInline):
model = Instance
- fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'deployment', 'flavor', 'image', 'node']
+ fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'deployment', 'flavor', 'image', 'node', 'no_sync']
extra = 0
readonly_fields = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name']
suit_classes = 'suit-tab suit-tab-instances'
@@ -1275,7 +1276,7 @@
class InstanceAdmin(XOSBaseAdmin):
form = InstanceForm
fieldsets = [
- ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'flavor', 'image', 'ssh_command'], 'classes': ['suit-tab suit-tab-general'], })
+ ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'flavor', 'image', 'ssh_command', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
]
readonly_fields = ('backend_status_text', 'ssh_command', 'all_ips_string')
list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'flavor', 'image', 'node', 'deployment']
@@ -1738,14 +1739,19 @@
form=NetworkForm
fieldsets = [
- (None, {'fields': ['backend_status_text', 'name','template','ports','labels','owner','guaranteed_bandwidth', 'permit_all_slices','permitted_slices','network_id','router_id','subnet_id','subnet'],
+ (None, {'fields': ['backend_status_text', 'name','template','ports','labels',
+ 'owner','guaranteed_bandwidth', 'permit_all_slices',
+ 'permitted_slices','network_id','router_id','subnet_id',
+ 'subnet', 'autoconnect'],
'classes':['suit-tab suit-tab-general']}),
(None, {'fields': ['topology_parameters', 'controller_url', 'controller_parameters'],
'classes':['suit-tab suit-tab-sdn']}),
]
readonly_fields = ('backend_status_text', )
- user_readonly_fields = ['name','template','ports','labels','owner','guaranteed_bandwidth', 'permit_all_slices','permitted_slices','network_id','router_id','subnet_id','subnet']
+ user_readonly_fields = ['name','template','ports','labels','owner','guaranteed_bandwidth',
+ 'permit_all_slices','permitted_slices','network_id','router_id',
+ 'subnet_id','subnet','autoconnect']
@property
def suit_form_tabs(self):
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index edaffe2..6fce3cd 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -119,6 +119,8 @@
router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
subnet_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum subnet id")
+ autoconnect = models.BooleanField(default=True, help_text="This network can be autoconnected to the slice that owns it")
+
def __unicode__(self): return u'%s' % (self.name)
def save(self, *args, **kwds):
diff --git a/xos/core/models/plcorebase.py b/xos/core/models/plcorebase.py
index 3eaf7e6..1a2c37c 100644
--- a/xos/core/models/plcorebase.py
+++ b/xos/core/models/plcorebase.py
@@ -152,6 +152,7 @@
deleted = models.BooleanField(default=False)
write_protect = models.BooleanField(default=False)
lazy_blocked = models.BooleanField(default=False)
+ no_sync = models.BooleanField(default=False)
class Meta:
# Changing abstract to False would require the managers of subclasses of
diff --git a/xos/core/models/user.py b/xos/core/models/user.py
index 37b3ca2..cb90145 100644
--- a/xos/core/models/user.py
+++ b/xos/core/models/user.py
@@ -337,16 +337,20 @@
def can_update_tenant_root_privilege(self, tenant_root_privilege, allow=[]):
return self.can_update_tenant_root(tenant_root_privilege.tenant_root, allow)
- def get_readable_objects(self, filter=None):
+ def get_readable_objects(self, filter_by=None):
""" Returns a list of objects that the user is allowed to read. """
- from core.models import Deployment, Network, Site, Slice, SliceTag, Instance, Tag, User
- models = [Deployment, Network, Site, Slice, SliceTag, Instance, Tag, User]
+ from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Instance, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
+ models = []
+ if filter_by and isinstance(filter_by, list):
+ models = [m for m in filter_by if issubclass(m, PlModelMixIn)]
+ if not models:
+ models = [Deployment, Network, Site, Slice, SliceTag, Instance, Tag, User]
readable_objects = []
for model in models:
readable_objects.extend(model.select_by_user(self))
return readable_objects
- def get_permissions(self, filter=None):
+ def get_permissions(self, filter_by=None):
""" Return a list of objects for which the user has read or read/write
access. The object will be an instance of a django model object.
Permissions will be either 'r' or 'rw'.
@@ -358,9 +362,12 @@
list of dicts
"""
- from core.models import *
+ from core.models import Deployment, Flavor, Image, Network, NetworkTemplate, Node, PlModelMixIn, Site, Slice, SliceTag, Instance, Tag, User, DeploymentPrivilege, SitePrivilege, SlicePrivilege
READ = 'r'
READWRITE = 'rw'
+ models = []
+ if filter_by and isinstance(filter_by, list):
+ models = [m for m in filter_by if issubclass(m, PlModelMixIn)]
deployment_priv_objs = [Image, NetworkTemplate, Flavor]
site_priv_objs = [Node, Slice, User]
@@ -389,6 +396,9 @@
permissions = []
permission_dict = lambda x,y: {'object': x, 'permission': y}
for privilege_model, (model, affected_models) in privileg_map.items():
+ if models and model not in models:
+ continue
+
# get the objects affected by this privilege model
affected_objects = []
for affected_model in affected_models:
@@ -431,6 +441,11 @@
return permissions
+
+ def get_tenant_permissions(self):
+ from core.models import Site, Slice
+ return self.get_object_permissions(filter_by=[Site,Slice])
+
@staticmethod
def select_by_user(user):
diff --git a/xos/model_policies/model_policy_Slice.py b/xos/model_policies/model_policy_Slice.py
index b610601..ac63fca 100644
--- a/xos/model_policies/model_policy_Slice.py
+++ b/xos/model_policies/model_policy_Slice.py
@@ -33,15 +33,17 @@
# make sure slice has at least 1 public and 1 private networkd
public_nets = []
- private_net = None
+ private_nets = []
networks = Network.objects.filter(owner=slice)
for network in networks:
+ if not network.autoconnect:
+ continue
if network.template.name == 'Public dedicated IPv4':
public_nets.append(network)
elif network.template.name == 'Public shared IPv4':
public_nets.append(network)
elif network.template.name == 'Private':
- private_net = network
+ private_nets.append(network)
if not public_nets:
# ensure there is at least one public network, and default it to dedicated
nat_net = Network(
@@ -52,27 +54,28 @@
nat_net.save()
public_nets.append(nat_net)
- if not private_net:
+ if not private_nets:
private_net = Network(
- name = slice.name+'-private',
- template = NetworkTemplate.objects.get(name='Private'),
- owner = slice
+ name = slice.name+'-private',
+ template = NetworkTemplate.objects.get(name='Private'),
+ owner = slice
)
private_net.save()
+ private_nets = [private_net]
# create slice networks
public_net_slice = None
private_net_slice = None
- net_slices = NetworkSlice.objects.filter(slice=slice, network__in=[private_net]+public_nets)
+ net_slices = NetworkSlice.objects.filter(slice=slice, network__in=private_nets+public_nets)
for net_slice in net_slices:
if net_slice.network in public_nets:
public_net_slice = net_slice
- elif net_slice.network == private_net:
+ elif net_slice.network in private_nets:
private_net_slice = net_slice
if not public_net_slice:
public_net_slice = NetworkSlice(slice=slice, network=public_nets[0])
public_net_slice.save()
if not private_net_slice:
- private_net_slice = NetworkSlice(slice=slice, network=private_net)
+ private_net_slice = NetworkSlice(slice=slice, network=private_nets[0])
private_net_slice.save()
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 46bd23a..13fadb8 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -349,7 +349,7 @@
try:
duration=time.time() - start_time
- logger.info('Executing step %s' % sync_step.__name__)
+ logger.info('Executing step %s, deletion=%s' % (sync_step.__name__, deletion))
print bcolors.OKBLUE + "Executing step %s" % sync_step.__name__ + bcolors.ENDC
failed_objects = sync_step(failed=list(self.failed_step_objects), deletion=deletion)
@@ -359,13 +359,13 @@
if failed_objects:
self.failed_step_objects.update(failed_objects)
- logger.info("Step %r succeeded" % step)
- print bcolors.OKGREEN + "Step %r succeeded" % step + bcolors.ENDC
+ logger.info("Step %r succeeded" % sync_step.__name__)
+ print bcolors.OKGREEN + "Step %r succeeded" % sync_step.__name__ + bcolors.ENDC
my_status = STEP_STATUS_OK
self.update_run_time(sync_step,deletion)
except Exception,e:
- print bcolors.FAIL + "Model step %r failed" % (step) + bcolors.ENDC
- logger.error('Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (step, e))
+ print bcolors.FAIL + "Model step %r failed" % (sync_step.__name__) + bcolors.ENDC
+ logger.error('Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (sync_step.__name__, e))
logger.log_exc(e)
self.failed_steps.append(S)
my_status = STEP_STATUS_KO
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index 4c8e934..54f2466 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -37,11 +37,11 @@
network_name = controller_network.network.name
subnet_name = '%s-%d'%(network_name,controller_network.pk)
cidr = self.alloc_subnet(controller_network.pk)
- slice = controller_network.network.slices.all()[0] # XXX: FIXME!!
+ slice = controller_network.network.owner
network_fields = {'endpoint':controller_network.controller.auth_url,
- 'admin_user':slice.creator.email, # XXX: FIXME
- 'tenant_name':slice.name, # XXX: FIXME
+ 'admin_user':slice.creator.email,
+ 'tenant_name':slice.name,
'admin_password':slice.creator.remote_password,
'name':network_name,
'subnet_name':subnet_name,
@@ -63,6 +63,7 @@
def sync_record(self, controller_network):
if (controller_network.network.template.name!='Private'):
+ logger.info("skipping network controller %s because it is not private" % controller_network)
# We only sync private networks
return
diff --git a/xos/openstack_observer/steps/sync_instances.py b/xos/openstack_observer/steps/sync_instances.py
index b42b092..08fce3d 100644
--- a/xos/openstack_observer/steps/sync_instances.py
+++ b/xos/openstack_observer/steps/sync_instances.py
@@ -55,8 +55,23 @@
if instance.slice.service and instance.slice.service.public_key:
pubkeys.add(instance.slice.service.public_key)
+ # Handle any ports that are already created and attached to the sliver.
+ # If we do have a port for a network, then add that network to an
+ # exclude list so we won't try to auto-create ports on that network
+ # when instantiating.
+ ports = []
+ exclude_networks = set()
+ exclude_templates = set()
+ for ns in sliver.networkslivers.all():
+ if not ns.port_id:
+ raise Exception("Port %s on sliver %s has no id; Try again later" % (str(ns), str(sliver)) )
+ ports.append(ns.port_id)
+ exclude_networks.add(ns.network)
+ exclude_templates.add(ns.network.template)
+
nics = []
networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
+ networks = [n for n in networks if (n not in exclude_networks)]
controller_networks = ControllerNetwork.objects.filter(network__in=networks,
controller=instance.node.site_deployment.controller)
@@ -67,9 +82,11 @@
raise Exception("Private Network %s has no id; Try again later" % controller_network.network.name)
nics.append(controller_network.net_id)
- # now include network template
+ # Now include network templates, for those networks that use a
+ # shared_network_name.
network_templates = [network.template.shared_network_name for network in networks \
if network.template.shared_network_name]
+ network_templates = [nt for nt in network_templates if (nt not in exclude_templates)]
#driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
@@ -78,7 +95,9 @@
if net['name'] in network_templates:
nics.append(net['id'])
- if (not nics):
+ # If the slice isn't connected to anything, then at least put it on
+ # the public network.
+ if (not nics) and (not ports):
for net in nets:
if net['name']=='public':
nics.append(net['id'])
@@ -127,6 +146,7 @@
'image_name':image_name,
'flavor_name':instance.flavor.name,
'nics':nics,
+ 'ports':ports,
'meta':metadata_update,
'user_data':r'%s'%escape(userData)}
diff --git a/xos/openstack_observer/steps/sync_network_instances.py b/xos/openstack_observer/steps/sync_network_instances.py
index b3b11e7..512f619 100644
--- a/xos/openstack_observer/steps/sync_network_instances.py
+++ b/xos/openstack_observer/steps/sync_network_instances.py
@@ -144,15 +144,38 @@
# that don't have neutron ports, and create them.
for networkInstance in NetworkInstance.objects.filter(port_id__isnull=True, instance__isnull=False):
#logger.info("working on networkinstance %s" % networkInstance)
- controller = instance.node.site_deployment.controller
+ controller = networkInstance.instance.node.site_deployment.controller
if controller:
cn=networkInstance.network.controllernetworks.filter(controller=controller)
if not cn:
logger.log_exc("no controllernetwork for %s" % networkInstance)
continue
cn=cn[0]
+ if cn.lazy_blocked:
+ cn.lazy_blocked=False
+ cn.save()
+ logger.info("deferring networkSliver %s because controllerNetwork was lazy-blocked" % networkSliver)
+ continue
+ if not cn.net_id:
+ logger.info("deferring networkSliver %s because controllerNetwork does not have a port-id yet" % networkSliver)
+ continue
try:
- driver = self.driver.admin_driver(controller = controller,tenant='admin')
+ # We need to use a client driver that specifies the tenant
+ # of the destination sliver. Nova-compute will not connect
+ # ports to slivers if the port's tenant does not match
+ # the sliver's tenant.
+
+ # A bunch of stuff to compensate for OpenStackDriver.client_driveR()
+ # not being in working condition.
+ from openstack.client import OpenStackClient
+ from openstack.driver import OpenStackDriver
+ caller = networkSliver.network.owner.creator
+ auth = {'username': caller.email,
+ 'password': caller.remote_password,
+ 'tenant': networkSliver.sliver.slice.name} # networkSliver.network.owner.name}
+ client = OpenStackClient(controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert,
+ driver = OpenStackDriver(client=client)
+
port = driver.shell.quantum.create_port({"port": {"network_id": cn.net_id}})["port"]
networkInstance.port_id = port["id"]
if port["fixed_ips"]:
diff --git a/xos/openstack_observer/steps/sync_slivers.yaml b/xos/openstack_observer/steps/sync_slivers.yaml
index c543227..803a294 100644
--- a/xos/openstack_observer/steps/sync_slivers.yaml
+++ b/xos/openstack_observer/steps/sync_slivers.yaml
@@ -21,6 +21,9 @@
{% for net in nics %}
- net-id: {{ net }}
{% endfor %}
+ {% for port in ports %}
+ - port-id: {{ port }}
+ {% endfor %}
{% if meta %}
meta:
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index f5703f2..cc6026d 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -72,7 +72,7 @@
# for figuring out what objects are outstanding.
main_obj = self.observes
if (not deletion):
- objs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+ objs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False), Q(no_sync=False))
else:
objs = main_obj.deleted_objects.all()
diff --git a/xos/tosca/engine.py b/xos/tosca/engine.py
index 086c6e1..3efb5ef 100644
--- a/xos/tosca/engine.py
+++ b/xos/tosca/engine.py
@@ -40,6 +40,8 @@
self.compute_dependencies()
+ self.deferred_sync = []
+
self.ordered_nodetemplates = []
self.ordered_names = self.topsort_dependencies()
print "ordered_names", self.ordered_names
@@ -133,12 +135,18 @@
for nodetemplate in self.ordered_nodetemplates:
self.execute_nodetemplate(user, nodetemplate)
+ for obj in self.deferred_sync:
+ self.log("Saving deferred sync obj %s" % obj)
+ obj.no_sync = False
+ obj.save()
+
def execute_nodetemplate(self, user, nodetemplate):
if nodetemplate.type in resources.resources:
cls = resources.resources[nodetemplate.type]
#print "work on", cls.__name__, nodetemplate.name
obj = cls(user, nodetemplate, self)
obj.create_or_update()
+ self.deferred_sync = self.deferred_sync + obj.deferred_sync
def destroy(self, user):
nodetemplates = self.ordered_nodetemplates
diff --git a/xos/tosca/imageselect.py b/xos/tosca/imageselect.py
index 2c0064f..0cf4da8 100644
--- a/xos/tosca/imageselect.py
+++ b/xos/tosca/imageselect.py
@@ -25,5 +25,9 @@
if found_imgs:
return found_imgs[0]
+ found_imgs=images.filter(name="trusty-server-multi-nic") # demo
+ if found_imgs:
+ return found_imgs[0]
+
raise Exception("Failed to find an acceptable image")
diff --git a/xos/tosca/resources/compute.py b/xos/tosca/resources/compute.py
index c856924..a169c5a 100644
--- a/xos/tosca/resources/compute.py
+++ b/xos/tosca/resources/compute.py
@@ -83,7 +83,9 @@
xos_args = self.get_xos_args(name=name, index=index)
instance = Instance(**xos_args)
instance.caller = self.user
+ instance.no_sync = True
instance.save()
+ self.deferred_sync.append(instance)
self.info("Created Instance '%s' on node '%s' using flavor '%s' and image '%s'" %
(str(instance), str(instance.node), str(instance.flavor), str(instance.image)))
diff --git a/xos/tosca/resources/network.py b/xos/tosca/resources/network.py
index 57180ae..96bc190 100644
--- a/xos/tosca/resources/network.py
+++ b/xos/tosca/resources/network.py
@@ -12,9 +12,11 @@
class XOSNetwork(XOSResource):
provides = ["tosca.nodes.network.Network", "tosca.nodes.network.Network.XOS"]
xos_model = Network
+ defaults = {"permit_all_slices": True}
def get_xos_args(self):
- args = {"name": self.nodetemplate.name}
+ args = {"name": self.nodetemplate.name,
+ "autoconnect": False,}
slice_name = self.get_requirement("tosca.relationships.MemberOfSlice")
if slice_name:
@@ -26,7 +28,7 @@
# copy simple string properties from the template into the arguments
for prop in ["ports", "labels", "permit_all_slices"]:
- v = self.get_property(prop)
+ v = self.get_property(prop, self.defaults.get(prop,None))
if v:
args[prop] = v
diff --git a/xos/tosca/resources/port.py b/xos/tosca/resources/port.py
new file mode 100644
index 0000000..435ba0f
--- /dev/null
+++ b/xos/tosca/resources/port.py
@@ -0,0 +1,61 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+
+from core.models import Sliver,User,Network,NetworkTemplate,NetworkSliver
+
+from xosresource import XOSResource
+
+class XOSPort(XOSResource):
+ provides = ["tosca.nodes.network.Port"]
+ xos_model = NetworkSliver
+
+ def get_existing_objs(self):
+ # Port objects have no name, their unique key is (sliver, network)
+ args = self.get_xos_args(throw_exception=False)
+ sliver = args.get('sliver',None)
+ network = args.get('network',None)
+ if (not sliver) or (not network):
+ return []
+ return self.xos_model.objects.filter(**{'sliver': sliver, 'network': network})
+
+ def get_xos_args(self, throw_exception=True):
+ args = {}
+
+ sliver_name = self.get_requirement("tosca.relationships.network.BindsTo")
+ if sliver_name:
+ args["sliver"] = self.get_xos_object(Sliver, throw_exception, name=sliver_name)
+
+ net_name = self.get_requirement("tosca.relationships.network.LinksTo")
+ if net_name:
+ args["network"] = self.get_xos_object(Network, throw_exception, name=net_name)
+
+ return args
+
+ def postprocess(self, obj):
+ pass
+
+ def create(self):
+ xos_args = self.get_xos_args()
+
+ if not xos_args.get("sliver", None):
+ raise Exception("Must specify slver when creating port")
+ if not xos_args.get("network", None):
+ raise Exception("Must specify network when creating port")
+
+ port = NetworkSliver(**xos_args)
+ port.caller = self.user
+ port.save()
+
+ self.postprocess(port)
+
+ self.info("Created NetworkSliver '%s' connect sliver '%s' to network %s" % (str(port), str(port.sliver), str(port.network)))
+
+ def delete(self, obj):
+ super(XOSPort, self).delete(obj)
+
+
+
diff --git a/xos/tosca/resources/xosresource.py b/xos/tosca/resources/xosresource.py
index 11a4958..8507172 100644
--- a/xos/tosca/resources/xosresource.py
+++ b/xos/tosca/resources/xosresource.py
@@ -9,6 +9,7 @@
def __init__(self, user, nodetemplate, engine):
self.dirty = False
+ self.deferred_sync = []
self.user = user
self.nodetemplate = nodetemplate
self.engine = engine
@@ -51,13 +52,18 @@
else:
return {}
- def get_property(self, name):
- return self.nodetemplate.get_property_value(name)
+ def get_property(self, name, default=None):
+ v = self.nodetemplate.get_property_value(name)
+ if (v==None):
+ return default
+ return v
- def get_xos_object(self, cls, **kwargs):
+ def get_xos_object(self, cls, throw_exception=True, **kwargs):
objs = cls.objects.filter(**kwargs)
if not objs:
- raise Exception("Failed to find %s filtered by %s" % (cls.__name__, str(kwargs)))
+ if throw_exception:
+ raise Exception("Failed to find %s filtered by %s" % (cls.__name__, str(kwargs)))
+ return None
return objs[0]
def get_existing_objs(self):
diff --git a/xos/tosca/samples/two_slices_shared_private_net.yaml b/xos/tosca/samples/two_slices_shared_private_net.yaml
index abd4d4e..4646f9e 100644
--- a/xos/tosca/samples/two_slices_shared_private_net.yaml
+++ b/xos/tosca/samples/two_slices_shared_private_net.yaml
@@ -14,7 +14,7 @@
type: tosca.nodes.NetworkTemplate
producer_private_network:
- type: tosca.nodes.network.Network.XOS
+ type: tosca.nodes.network.Network
properties:
ip_version: 4
requirements:
@@ -96,11 +96,15 @@
node: producer_server
relationship: tosca.relationships.network.BindsTo
-# consumer_pvt_net_port:
-# type: tosca.nodes.network.Port
-# requirements:
-# - link: producer_private_network
-# - binding: consumer_server
+ consumer_pvt_net_port:
+ type: tosca.nodes.network.Port
+ requirements:
+ - link:
+ node: producer_private_network
+ relationship: tosca.relationships.network.LinksTo
+ - binding:
+ node: consumer_server
+ relationship: tosca.relationships.network.BindsTo