Refactor and modularize edgeconfig scripts
- Entirely new netbox helper functions, using pynetbox and objects,
instead of previous spaghetti code
- Allow for VM interfaces
- Allow device names to specify more than one segment of the DNS subdomain
- Split out forward and reverse DNS
- Fix issues with DHCP zone creation
- Support advertising NTP server via DHCP option
Playbooks
- Add QA, router, DNS, and user creation/config playbook
- Fix YAML formatting issues with playbooks
Change-Id: Id6c010ef1e122f4fd1bd97e9bb2128c4271947d0
diff --git a/Makefile b/Makefile
index 8619e8c..0ebfcac 100644
--- a/Makefile
+++ b/Makefile
@@ -18,7 +18,7 @@
YAML_FILES ?= $(shell find . -type d \( -path "./venv_onfansible" -o -path "./cookiecutters" -o -path "./ansible_collections" -o -path "./roles" \) -prune -o -type f \( -name '*.yaml' -o -name '*.yml' \) -print )
# all files with extensions
-PYTHON_FILES ?= $(wildcard filter_plugins/*.py lint_rules/*.py cookiecutters/*/hooks/*.py roles/*/filter_plugins/*.py)
+PYTHON_FILES ?= $(wildcard scripts/*.py filter_plugins/*.py lint_rules/*.py cookiecutters/*/hooks/*.py roles/*/filter_plugins/*.py)
.DEFAULT_GOAL := help
.PHONY: test lint yamllint ansiblelint license help
diff --git a/README.md b/README.md
index db1c6e2..50549c9 100644
--- a/README.md
+++ b/README.md
@@ -203,6 +203,9 @@
The `with_items` and other `with_*` iterators should be put at the end of the
task.
+Handlers should be named `<action>-<subject>` for consistency - examples:
+`restart-nginx` or `start-postgres`.
+
If you are iterating on lists that contains password or other secure data that
should not be leaked into the output, set `no_log: true` so the items being
iterated on are not printed.
diff --git a/inventory/example-netbox.yml b/inventory/example-netbox.yml
index dc31ea0..18fec73 100644
--- a/inventory/example-netbox.yml
+++ b/inventory/example-netbox.yml
@@ -19,7 +19,7 @@
## used with the netbox edgeconfig script
ip_prefixes:
- - "10.0.0.0/25"
- - "10.0.0.128/25"
- - "10.0.1.0/25"
- - "10.0.1.128/25"
+ - "10.0.0.0/25"
+ - "10.0.0.128/25"
+ - "10.0.1.0/25"
+ - "10.0.1.128/25"
diff --git a/inventory/group_vars/aethercompute.yml b/inventory/group_vars/aethercompute.yml
index e7d3560..819930c 100644
--- a/inventory/group_vars/aethercompute.yml
+++ b/inventory/group_vars/aethercompute.yml
@@ -1,23 +1,5 @@
---
-netprep_router: false
-netprep_netplan_file: "01-netcfg"
-
-netprep_netplan:
- ethernets:
- eno1:
- dhcp4: yes
- dhcp4-overrides:
- route-metric: 100
- enp175s0f0:
- dhcp4: yes
- dhcp4-overrides:
- route-metric: 200
- enp175s0f1:
- dhcp4: yes
- dhcp4-overrides:
- route-metric: 200
-
userlist:
- username: terraform
comment: "ONF Terraform User"
diff --git a/playbooks/aethercompute-playbook.yml b/playbooks/aethercompute-playbook.yml
index d88483a..ba97a0f 100644
--- a/playbooks/aethercompute-playbook.yml
+++ b/playbooks/aethercompute-playbook.yml
@@ -1,3 +1,4 @@
+---
# Ansible playbook to configure aether compute nodes
- name: Configure aether compute nodes
diff --git a/playbooks/aetherfabric-playbook.yml b/playbooks/aetherfabric-playbook.yml
index e5c0a36..5c47a5c 100644
--- a/playbooks/aetherfabric-playbook.yml
+++ b/playbooks/aetherfabric-playbook.yml
@@ -1,3 +1,4 @@
+---
# Ansible playbook to configure aether fabric switches
- name: Configure aether fabric switches
diff --git a/playbooks/aethermgmt-playbook.yml b/playbooks/aethermgmt-playbook.yml
index 39be1c4..6e9d2d9 100644
--- a/playbooks/aethermgmt-playbook.yml
+++ b/playbooks/aethermgmt-playbook.yml
@@ -1,3 +1,4 @@
+---
# Ansible playbook to configure an aether management nodes
- name: Configure an aether management node
@@ -7,6 +8,7 @@
- netprep
- nsd
- unbound
+ - chrony
- dhcpd
- nginx
- onieboot
diff --git a/playbooks/dns-playbook.yml b/playbooks/dns-playbook.yml
index 33c4c9b..5c69990 100644
--- a/playbooks/dns-playbook.yml
+++ b/playbooks/dns-playbook.yml
@@ -6,3 +6,4 @@
become: true
roles:
- nsd
+ - unbound
diff --git a/playbooks/ntp-playbook.yml b/playbooks/ntp-playbook.yml
new file mode 100644
index 0000000..9aeb92d
--- /dev/null
+++ b/playbooks/ntp-playbook.yml
@@ -0,0 +1,9 @@
+---
+# Ansible playbook to configure an aether management nodes
+
+- name: Configure an aether management node with chrony ntp
+ hosts: aethermgmt
+ become: true
+ roles:
+ - chrony
+ - dhcpd
diff --git a/playbooks/pxeboot-playbook.yml b/playbooks/pxeboot-playbook.yml
new file mode 100644
index 0000000..910ca3c
--- /dev/null
+++ b/playbooks/pxeboot-playbook.yml
@@ -0,0 +1,9 @@
+---
+# Ansible playbook to configure a iPXE pxeboot webserver
+
+- name: Configure a pxeboot chainload webserver for iPXE
+ hosts: static,pxeboot
+ become: true
+ roles:
+ - nginx
+ - pxeboot
diff --git a/playbooks/pxeboot-playboot.yml b/playbooks/pxeboot-playboot.yml
deleted file mode 100644
index 3cbdae0..0000000
--- a/playbooks/pxeboot-playboot.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# Ansible playbook to configure a iPXE pxeboot webserver
-
-- name: Configure pxeboot
- hosts: static
- become: true
- roles:
- - pxeboot
diff --git a/playbooks/qa-playbook.yml b/playbooks/qa-playbook.yml
new file mode 100644
index 0000000..d81b35f
--- /dev/null
+++ b/playbooks/qa-playbook.yml
@@ -0,0 +1,26 @@
+---
+# Ansible playbook to configure uses on a set of nodes
+
+- name: Configure users and extra software for QA tasks on the compute nodes
+ hosts: aethercompute:aethermgmt
+ become: true
+
+ roles:
+ - users
+
+ tasks:
+
+ - name: Install QA packages
+ apt:
+ name:
+ - "arping"
+ - "ethtool"
+ - "iperf"
+ - "ipvsadm"
+ - "tcpdump"
+ - "tcpreplay"
+ - "traceroute"
+ - "tshark"
+ state: "present"
+ update_cache: true
+ cache_valid_time: 3600
diff --git a/playbooks/router-playbook.yml b/playbooks/router-playbook.yml
new file mode 100644
index 0000000..ca614a1
--- /dev/null
+++ b/playbooks/router-playbook.yml
@@ -0,0 +1,15 @@
+---
+# Ansible playbook to configure a router
+
+- name: Configure a router
+ hosts: router
+ become: true
+ roles:
+# - netprep
+ - nsd
+ - unbound
+ - dhcpd
+# - nginx
+# - onieboot
+# - users
+
diff --git a/playbooks/users-playbook.yml b/playbooks/users-playbook.yml
new file mode 100644
index 0000000..1bcc8ef
--- /dev/null
+++ b/playbooks/users-playbook.yml
@@ -0,0 +1,9 @@
+---
+# Ansible playbook to configure uses on a set of nodes
+
+- name: Configure users
+ hosts: all
+ become: true
+
+ roles:
+ - users
diff --git a/requirements.txt b/requirements.txt
index 03890a8..2c022db 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,8 +3,8 @@
# SPDX-FileCopyrightText: © 2020 Open Networking Foundation <support@opennetworking.org>
# SPDX-License-Identifier: Apache-2.0
-ansible-lint~=4.3.3
-ansible~=2.9.11
+ansible-lint~=4.3.7
+ansible~=2.10.7
bcrypt~=3.1.7
black~=19.10b0
cookiecutter~=1.7.2
@@ -14,9 +14,9 @@
molecule~=3.0.8
netaddr~=0.7.19
passlib~=1.7.2
-pylint~=2.5.3
-pynetbox~=5.1.0
+pylint~=2.7.2
+pynetbox~=5.3.1
python-vagrant~=0.5.15
reuse~=0.11.1
-yamllint~=1.24.2
+yamllint~=1.26.0
zxcvbn~=4.4.28
diff --git a/scripts/edgeconfig.py b/scripts/edgeconfig.py
new file mode 100644
index 0000000..5dc21fc
--- /dev/null
+++ b/scripts/edgeconfig.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
+# edgeconfig.py
+# Given a yaml config file (same as ansible inventory for a site), create a
+# YAML file consumable by ansible as variables that configures the managmeent
+# node for that site
+
+from __future__ import absolute_import
+
+import argparse
+import nbhelper
+import json
+import os
+import pprint
+
+from ruamel import yaml
+
+# main function that calls other functions
+if __name__ == "__main__":
+
+ # this is passed to argparse, key is option name, rest is kwargs
+ extra_args = {
+ "base_config": {
+ "default": os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "base_edgeconfig.yaml"
+ ),
+ "nargs": "?",
+ "type": argparse.FileType("r"),
+ "help": "Config (optional, default: base_edgeconfig.yaml)",
+ },
+ }
+
+ args = nbhelper.parse_cli_args(extra_args)
+ nbh = nbhelper.NBHelper(args)
+
+ # use base_config for additional items
+ yaml_out = yaml.safe_load(args.base_config.read())
+
+ dhcpd_subnets = []
+ dhcpd_interfaces = []
+
+ # reverse zones aggregate across RFC1918 IP prefix
+ dns_reverse_zones = nbhelper.NBDNSReverseZones()
+
+ for prefix in nbh.all_prefixes():
+
+ nbhelper.NBDNSForwardZone.get_fwd_zone(prefix)
+
+ dns_reverse_zones.add_prefix(prefix)
+
+ dhcpd_subnet = nbhelper.NBDHCPSubnet(prefix)
+
+ dhcpd_if = dhcpd_subnet.dhcpd_interface
+
+ if dhcpd_if and dhcpd_if not in dhcpd_interfaces:
+ dhcpd_interfaces.append(dhcpd_if)
+
+ dhcpd_subnets.append(dhcpd_subnet)
+
+ # yaml_out["devices"] = nbhelper.NBDevice.all_devs()
+ yaml_out["dns_forward_zones"] = nbhelper.NBDNSForwardZone.all_fwd_zones()
+ yaml_out["dns_reverse_zones"] = dns_reverse_zones
+ yaml_out["dhcpd_subnets"] = dhcpd_subnets
+ yaml_out["dhcpd_interfaces"] = dhcpd_interfaces
+
+ print(yaml.safe_dump(yaml_out, indent=2))
diff --git a/scripts/nbhelper.py b/scripts/nbhelper.py
new file mode 100644
index 0000000..75195e9
--- /dev/null
+++ b/scripts/nbhelper.py
@@ -0,0 +1,972 @@
+#!/usr/bin/env python3
+
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
+# nbhelper.py
+# Helper functions for building YAML output from Netbox API calls
+
+from __future__ import absolute_import
+
+import re
+import sys
+import argparse
+import logging
+import netaddr
+import pynetbox
+import requests
+
+from ruamel import yaml
+
+# create shared logger
+logging.basicConfig()
+logger = logging.getLogger("nbh")
+
+# to dump YAML properly, using internal representers
+# see also:
+# https://stackoverflow.com/questions/54378220/declare-data-type-to-ruamel-yaml-so-that-it-can-represen-serialize-it
+# https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/representer.py
+
+ydump = yaml.YAML(typ="safe")
+ydump.representer.add_representer(
+ pynetbox.models.dcim.Devices, yaml.SafeRepresenter.represent_dict
+)
+ydump.representer.add_representer(
+ pynetbox.models.dcim.Interfaces, yaml.SafeRepresenter.represent_dict
+)
+ydump.representer.add_representer(
+ pynetbox.models.ipam.Prefixes, yaml.SafeRepresenter.represent_dict
+)
+ydump.representer.add_representer(
+ pynetbox.core.response.Record, yaml.SafeRepresenter.represent_dict
+)
+ydump.representer.add_representer(
+ pynetbox.models.ipam.IpAddresses, yaml.SafeRepresenter.represent_dict
+)
+ydump.representer.add_representer(
+ pynetbox.core.api.Api, yaml.SafeRepresenter.represent_none
+)
+
+
+def parse_cli_args(extra_args={}):
+ """
+ parse CLI arguments. Can add extra arguments with a option:kwargs dict
+ """
+
+ parser = argparse.ArgumentParser(description="Netbox")
+
+ # Positional args
+ parser.add_argument(
+ "settings",
+ type=argparse.FileType("r"),
+ help="YAML Ansible inventory file w/NetBox API token",
+ )
+
+ parser.add_argument(
+ "--debug", action="store_true", help="Print additional debugging information"
+ )
+
+ if extra_args:
+ for ename, ekwargs in extra_args.items():
+ parser.add_argument(ename, **ekwargs)
+
+ args = parser.parse_args()
+
+ # only print log messages if debugging
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ else:
+ logger.setLevel(logging.INFO)
+
+ return args
+
+
+class AttrDict(dict):
+ def __init__(self, *args, **kwargs):
+ super(AttrDict, self).__init__(*args, **kwargs)
+ self.__dict__ = self
+
+
+class NBHelper:
+ def __init__(self, args):
+
+ self.settings = yaml.safe_load(args.settings.read())
+
+ self.nbapi = pynetbox.api(
+ self.settings["api_endpoint"], token=self.settings["token"], threading=True,
+ )
+
+ if not self.settings["validate_certs"]:
+
+ session = requests.Session()
+ session.verify = False
+ self.nbapi.http_session = session
+
+ self.nb_version = self.nbapi.version
+
+ def all_prefixes(self):
+ """
+ Return a list of prefix objects
+ """
+
+ p_items = []
+
+ segments = 1
+
+ if "prefix_segments" in self.settings:
+ segments = self.settings["prefix_segments"]
+
+ for prefix in self.settings["ip_prefixes"]:
+ p_items.append(NBPrefix.get_prefix(self.nbapi, prefix, segments))
+
+ return p_items
+
+ @classmethod
+ def check_name_dns(cls, name):
+
+ badchars = re.search("[^a-z0-9.-]", name.lower(), re.ASCII)
+
+ if badchars:
+ logger.error(
+ "DNS name '%s' has one or more invalid characters: '%s'",
+ name,
+ badchars.group(0),
+ )
+ sys.exit(1)
+
+ return name.lower()
+
+ @classmethod
+ def clean_name_dns(cls, name):
+ return re.sub("[^a-z0-9.-]", "-", name.lower(), 0, re.ASCII)
+
+
+@yaml.yaml_object(ydump)
+class NBPrefix:
+
+ prefixes = {}
+
+ def __init__(self, api, prefix, name_segments):
+
+ self.nbapi = api
+ self.prefix = prefix
+ self.name_segments = name_segments
+
+ # get prefix information
+ self.prefix_data = self.nbapi.ipam.prefixes.get(prefix=self.prefix)
+ self.domain_extension = NBHelper.check_name_dns(self.prefix_data.description)
+
+ logger.debug(
+ "prefix %s, domain_extension %s, data: %s",
+ self.prefix,
+ self.domain_extension,
+ dict(self.prefix_data),
+ )
+
+ # ip centric info
+ self.dhcp_range = None
+ self.reserved_ips = {}
+ self.aos = {}
+
+ # build item lists
+ self.build_prefix()
+
+ @classmethod
+ def all_prefixes(cls):
+ return cls.prefixes
+
+ @classmethod
+ def get_prefix(cls, api, prefix, name_segments=1):
+ if prefix in cls.prefixes:
+ return cls.prefixes[prefix]
+
+ return NBPrefix(api, prefix, name_segments)
+
+ def __repr__(self):
+ return str(self.prefix)
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(
+ {
+ "dhcp_range": node.dhcp_range,
+ "reserved_ips": node.reserved_ips,
+ "aos": node.aos,
+ "prefix_data": dict(node.prefix_data),
+ }
+ )
+
+ def parent(self):
+ """
+ Get the parent prefix to this prefix
+
+ FIXME: Doesn't handle multiple layers of prefixes, returns first found
+ """
+
+ # get all parents of this prefix (include self)
+ possible_parents = self.nbapi.ipam.prefixes.filter(contains=self.prefix)
+
+ logger.debug("Prefix %s: possible parents %s", self.prefix, possible_parents)
+
+ # filter out self, return first found
+ for pparent in possible_parents:
+ if pparent.prefix != self.prefix:
+ return NBPrefix.get_prefix(
+ self.nbapi, pparent.prefix, self.name_segments
+ )
+
+ return None
+
+ def build_prefix(self):
+ """
+ find ip information for items (devices/vms, reserved_ips, dhcp_range) in prefix
+ """
+
+ ips = self.nbapi.ipam.ip_addresses.filter(parent=self.prefix)
+
+ for ip in sorted(ips, key=lambda k: k["address"]):
+
+ logger.debug("prefix_item ip: %s, data: %s", ip, dict(ip))
+
+ # if it's a DHCP range, add that range to the dev list as prefix_dhcp
+ if ip.status.value == "dhcp":
+ self.dhcp_range = str(ip.address)
+ continue
+
+ # reserved IPs
+ if ip.status.value == "reserved":
+
+ res = {}
+ res["name"] = ip.description.lower().split(" ")[0]
+ res["description"] = ip.description
+ res["ip4"] = str(netaddr.IPNetwork(ip.address))
+ res["custom_fields"] = ip.custom_fields
+
+ self.reserved_ips[str(ip)] = res
+ continue
+
+ # devices and VMs
+ if ip.assigned_object: # can be null if not assigned to a device/vm
+ aotype = ip.assigned_object_type
+
+ if aotype == "dcim.interface":
+
+ self.aos[str(ip)] = NBDevice.get_dev(
+ self.nbapi, ip.assigned_object.device.id,
+ )
+
+ elif aotype == "virtualization.vminterface":
+ self.aos[str(ip)] = NBVirtualMachine.get_vm(
+ self.nbapi, ip.assigned_object.virtual_machine.id,
+ )
+
+ else:
+ logger.error("IP %s has unknown device type: %s", ip, aotype)
+ sys.exit(1)
+
+ else:
+ logger.warning("Unknown IP type %s, with attributes: %s", ip, dict(ip))
+
+
+@yaml.yaml_object(ydump)
+class NBAssignedObject:
+ """
+ Assigned Object is either a Device or Virtual Machine, which function
+ nearly identically in the NetBox data model.
+
+ This parent class holds common functions for those two child classes
+ """
+
+ def __init__(self, api):
+ self.nbapi = api
+
+ def dns_name(self, ip, prefix):
+ """
+ Returns the DNS name for the device at this IP in the prefix
+ """
+
+ def first_segment_suffix(split_name, suffixes, segments):
+ first_seg = "-".join([split_name[0], *suffixes])
+
+ if segments > 1:
+ name = ".".join([first_seg, *split_name[1:segments]])
+ else:
+ name = first_seg
+
+ return name
+
+ # clean/split the device name
+ name_split = NBHelper.clean_name_dns(self.data.name).split(".")
+
+ # always add interface suffix to mgmt interfaces
+ if self.interfaces_by_ip[ip].mgmt_only:
+ return first_segment_suffix(
+ name_split, [self.interfaces_by_ip[ip].name], prefix.name_segments
+ )
+
+ # find all IP's for this device in the prefix that aren't mgmt interfaces
+ prefix_ips = []
+ for s_ip in self.ips:
+ if s_ip in prefix.aos and not self.interfaces_by_ip[s_ip].mgmt_only:
+ prefix_ips.append(s_ip)
+
+ # name to use when only one IP address for device in a prefix
+ simple_name = ".".join(name_split[0 : prefix.name_segments])
+
+ # if more than one non-mgmt IP in prefix
+ if len(prefix_ips) > 1:
+
+ # use bare name if primary IP address
+ try: # skip if no primary_ip.address
+ if ip == self.data.primary_ip.address:
+ return simple_name
+ except AttributeError:
+ pass
+
+ # else, suffix with the interface name, and the last octet of IP address
+ return first_segment_suffix(
+ name_split,
+ [
+ self.interfaces_by_ip[ip].name,
+ str(netaddr.IPNetwork(ip).ip.words[3]),
+ ],
+ prefix.name_segments,
+ )
+
+ # simplest case - only one IP in prefix, return simple_name
+ return simple_name
+
+ def dns_cnames(self, ip):
+ """
+ returns a list of cnames for this object, based on IP matches
+ """
+
+ cnames = []
+
+ for service in self.services:
+
+ # if not assigned to any IP's, service is on all IPs
+ if not service.ipaddresses:
+ cnames.append(service.name)
+ continue
+
+ # If assigned to an IP, only create a CNAME on that IP
+ for service_ip in service.ipaddresses:
+ if ip == service_ip.address:
+ cnames.append(service.name)
+
+ return cnames
+
+ def has_service(self, cidr_ip, port, protocol):
+ """
+ Return True if this AO has a service using specific port and protocol combination
+ """
+
+ if (
+ cidr_ip in self.interfaces_by_ip
+ and not self.interfaces_by_ip[cidr_ip].mgmt_only
+ ):
+ for service in self.services:
+ if service.port == port and service.protocol.value == protocol:
+ return True
+
+ return False
+
+ def primary_iface(self):
+ """
+ Returns the interface data for the device that has the primary_ip
+ """
+
+ if self.data["primary_ip"]:
+ return self.interfaces_by_ip[self.data["primary_ip"]["address"]]
+
+ return None
+
+
+@yaml.yaml_object(ydump)
+class NBDevice(NBAssignedObject):
+ """
+ Wraps a single Netbox device
+ Also caches all known devices in a class variable (devs)
+ """
+
+ devs = {}
+
+ def __init__(self, api, dev_id):
+
+ super().__init__(api)
+
+ self.id = dev_id
+ self.data = self.nbapi.dcim.devices.get(dev_id)
+ self.services = self.nbapi.ipam.services.filter(device_id=dev_id)
+
+ # not filled in unless specifically asked for (expensive for a 48 port switch)
+ self.interfaces = []
+ self.mgmt_interfaces = []
+
+ # look up all IP's for this device
+ self.ips = {
+ str(ip): ip for ip in self.nbapi.ipam.ip_addresses.filter(device_id=dev_id)
+ }
+
+ # look up interfaces by IP
+ self.interfaces_by_ip = {}
+ for ip, ip_data in self.ips.items():
+ if ip_data.assigned_object:
+ self.interfaces_by_ip[ip] = self.nbapi.dcim.interfaces.get(
+ ip_data.assigned_object_id
+ )
+
+ logger.debug(
+ "NBDevice id: %d, data: %s, ips: %s", self.id, dict(self.data), self.ips,
+ )
+
+ self.devs[dev_id] = self
+
+ def __repr__(self):
+ return str(dict(self.data))
+
+ def get_interfaces(self):
+ if not self.interfaces:
+ self.interfaces = self.nbapi.dcim.interfaces.filter(device_id=self.id)
+
+ return self.interfaces
+
+ @classmethod
+ def get_dev(cls, api, dev_id):
+ if dev_id in cls.devs:
+ return cls.devs[dev_id]
+
+ return NBDevice(api, dev_id)
+
+ @classmethod
+ def all_devs(cls):
+ return cls.devs
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(
+ {
+ "data": node.data,
+ "services": node.services,
+ "ips": node.ips,
+ "interfaces_by_ip": node.interfaces_by_ip,
+ }
+ )
+
+
+@yaml.yaml_object(ydump)
+class NBVirtualMachine(NBAssignedObject):
+ """
+ VM equivalent of NBDevice
+ """
+
+ vms = {}
+
+ def __init__(self, api, vm_id):
+
+ super().__init__(api)
+
+ self.id = vm_id
+ self.data = self.nbapi.virtualization.virtual_machines.get(vm_id)
+ self.services = self.nbapi.ipam.services.filter(virtual_machine_id=vm_id)
+
+ # not filled in unless specifically asked for
+ self.interfaces = []
+
+ # look up all IP's for this device
+ self.ips = {
+ str(ip): ip
+ for ip in self.nbapi.ipam.ip_addresses.filter(virtual_machine_id=vm_id)
+ }
+
+ # look up interfaces by IP
+ self.interfaces_by_ip = {}
+ for ip, ip_data in self.ips.items():
+ if ip_data.assigned_object:
+ self.interfaces_by_ip[ip] = self.nbapi.virtualization.interfaces.get(
+ ip_data.assigned_object_id
+ )
+ # hack as VM interfaces lack this key, and needed for services
+ self.interfaces_by_ip[ip].mgmt_only = False
+
+ logger.debug(
+ "NBVirtualMachine id: %d, data: %s, ips: %s",
+ self.id,
+ dict(self.data),
+ self.ips,
+ )
+
+ self.vms[vm_id] = self
+
+ def __repr__(self):
+ return str(dict(self.data))
+
+ def get_interfaces(self):
+ if not self.interfaces:
+ self.interfaces = self.nbapi.virtualization.interfaces.filter(
+ virtual_machine_id=self.id
+ )
+
+ return self.interfaces
+
+ @classmethod
+ def get_vm(cls, api, vm_id):
+ if vm_id in cls.vms:
+ return cls.vms[vm_id]
+
+ return NBVirtualMachine(api, vm_id)
+
+ @classmethod
+ def all_vms(cls):
+ return cls.vms
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(
+ {
+ "data": node.data,
+ "services": node.services,
+ "ips": node.ips,
+ "interfaces_by_ip": node.interfaces_by_ip,
+ }
+ )
+
+
+@yaml.yaml_object(ydump)
+class NBDNSForwardZone:
+
+ fwd_zones = {}
+
+ def __init__(self, prefix):
+
+ self.domain_extension = prefix.domain_extension
+
+ self.a_recs = {}
+ self.cname_recs = {}
+ self.srv_recs = {}
+ self.ns_recs = []
+ self.txt_recs = {}
+
+ if prefix.dhcp_range:
+ self.create_dhcp_fwd(prefix.dhcp_range)
+
+ for ip, ao in prefix.aos.items():
+ self.add_ao_records(prefix, ip, ao)
+
+ for ip, res in prefix.reserved_ips.items():
+ self.add_reserved(ip, res)
+
+ # reqquired for the add_fwd_cname function below
+ if callable(getattr(prefix, "parent")):
+ parent_prefix = prefix.parent()
+
+ if parent_prefix:
+ self.merge_parent_prefix(parent_prefix, prefix)
+
+ self.fwd_zones[self.domain_extension] = self
+
+ def __repr__(self):
+ return str(
+ {
+ "a": self.a_recs,
+ "cname": self.cname_recs,
+ "ns": self.ns_recs,
+ "srv": self.srv_recs,
+ "txt": self.txt_recs,
+ }
+ )
+
+ @classmethod
+ def add_fwd_cname(cls, cname, fqdn_dest):
+ """
+ Add an arbitrary CNAME (and possibly create the fwd zone if needed) pointing
+ at a FQDN destination name. It's used to support the per-IP "DNS name" field in NetBox
+ Note that the NS record
+ """
+
+ try:
+ fqdn_split = re.compile(r"([a-z]+)\.([a-z.]+)\.")
+ (short_name, extension) = fqdn_split.match(cname).groups()
+
+ except AttributeError:
+ logger.warning(
+ "Invalid DNS CNAME: '%s', must be in FQDN format: 'host.example.com.', ignored",
+ cname,
+ )
+ return
+
+ fake_prefix = AttrDict(
+ {
+ "domain_extension": extension,
+ "dhcp_range": None,
+ "aos": {},
+ "reserved_ips": {},
+ "parent": None,
+ }
+ )
+
+ fwd_zone = cls.get_fwd_zone(fake_prefix)
+
+ fwd_zone.cname_recs[short_name] = fqdn_dest
+
+ @classmethod
+ def get_fwd_zone(cls, prefix):
+ if prefix.domain_extension in cls.fwd_zones:
+ return cls.fwd_zones[prefix.domain_extension]
+
+ return NBDNSForwardZone(prefix)
+
+ @classmethod
+ def all_fwd_zones(cls):
+ return cls.fwd_zones
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(
+ {
+ "a": node.a_recs,
+ "cname": node.cname_recs,
+ "ns": node.ns_recs,
+ "srv": node.srv_recs,
+ "txt": node.txt_recs,
+ }
+ )
+
+ def fqdn(self, name):
+ return "%s.%s." % (name, self.domain_extension)
+
+ def create_dhcp_fwd(self, dhcp_range):
+
+ for ip in netaddr.IPNetwork(dhcp_range).iter_hosts():
+ self.a_recs["dhcp%03d" % (ip.words[3])] = str(ip)
+
+ def name_is_duplicate(self, name, target, record_type):
+ """
+ Returns True if name already exists in the zone as an A or CNAME
+ record, False otherwise
+ """
+
+ if name in self.a_recs:
+ logger.warning(
+ "Duplicate DNS record for name %s - A record to '%s', %s record to '%s'",
+ name,
+ self.a_recs[name],
+ record_type,
+ target,
+ )
+ return True
+
+ if name in self.cname_recs:
+ logger.warning(
+ "Duplicate DNS record for name %s - CNAME record to '%s', %s record to '%s'",
+ name,
+ self.cname_recs[name],
+ record_type,
+ target,
+ )
+ return True
+
+ return False
+
+ def add_ao_records(self, prefix, ip, ao):
+
+ name = ao.dns_name(ip, prefix)
+ target_ip = str(netaddr.IPNetwork(ip).ip) # make bare IP, not CIDR format
+
+ # add A records
+ if not self.name_is_duplicate(name, target_ip, "A"):
+ self.a_recs[name] = target_ip
+
+ # add CNAME records that alias to this name
+ for cname in ao.dns_cnames(ip):
+ # check that it isn't a dupe
+ if not self.name_is_duplicate(cname, target_ip, "CNAME"):
+ self.cname_recs[cname] = self.fqdn(name)
+
+ # add NS records if this is a DNS server
+ if ao.has_service(ip, 53, "udp"):
+ self.ns_recs.append(self.fqdn(name))
+
+ # if a DNS name is set, add it as a CNAME
+ if ao.ips[ip]["dns_name"]: # and ip == aos.data.primary_ip.address:
+ self.add_fwd_cname(ao.ips[ip]["dns_name"], self.fqdn(name))
+
+ def add_reserved(self, ip, res):
+
+ target_ip = str(netaddr.IPNetwork(ip).ip) # make bare IP, not CIDR format
+
+ if not self.name_is_duplicate(res["name"], target_ip, "A"):
+ self.a_recs[res["name"]] = target_ip
+
+ def merge_parent_prefix(self, pprefix, prefix):
+
+ # only if no NS records exist already
+ if not self.ns_recs:
+ # scan parent prefix for services
+ for ip, ao in pprefix.aos.items():
+
+ # Create a DNS within this prefix pointing to out-of-prefix IP
+ # where DNS server is
+ name = ao.dns_name(ip, prefix)
+ target_ip = str(
+ netaddr.IPNetwork(ip).ip
+ ) # make bare IP, not CIDR format
+
+ # add NS records if this is a DNS server
+ if ao.has_service(ip, 53, "udp"):
+ self.a_recs[name] = target_ip
+ self.ns_recs.append(self.fqdn(name))
+
+
+@yaml.yaml_object(ydump)
+class NBDNSReverseZones:
+ def __init__(self):
+
+ self.reverse_zones = {}
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(node.reverse_zones)
+
+ @classmethod
+ def canonicalize_rfc1918_prefix(cls, prefix):
+ """
+ RFC1918 prefixes need to be expanded to their widest canonical range to
+ group all reverse lookup domains together for reverse DNS with NSD/Unbound.
+ """
+
+ pnet = netaddr.IPNetwork(str(prefix))
+ (o1, o2, o3, o4) = pnet.network.words # Split ipv4 octets
+ cidr_plen = pnet.prefixlen
+
+ if o1 == 10:
+ o2 = o3 = o4 = 0
+ cidr_plen = 8
+ elif (o1 == 172 and o2 >= 16 and o2 <= 31) or (o1 == 192 and o2 == 168):
+ o3 = o4 = 0
+ cidr_plen = 16
+
+ return "%s/%d" % (".".join(map(str, [o1, o2, o3, o4])), cidr_plen)
+
+ def add_prefix(self, prefix):
+
+ canonical_prefix = self.canonicalize_rfc1918_prefix(prefix)
+
+ if canonical_prefix in self.reverse_zones:
+ rzone = self.reverse_zones[canonical_prefix]
+ else:
+ rzone = {
+ "ns": [],
+ "ptr": {},
+ }
+
+ if prefix.dhcp_range:
+ # FIXME: doesn't check for duplicate entries
+ rzone["ptr"].update(self.create_dhcp_rev(prefix))
+
+ for ip, ao in prefix.aos.items():
+ target_ip = str(netaddr.IPNetwork(ip).ip) # make bare IP, not CIDR format
+ ao_name = self.get_ao_name(ip, ao, prefix,)
+ rzone["ptr"][target_ip] = ao_name
+
+ # add NS records if this is a DNS server
+ if ao.has_service(ip, 53, "udp"):
+ rzone["ns"].append(ao_name)
+
+ parent_prefix = prefix.parent()
+
+ if parent_prefix:
+ self.merge_parent_prefix(rzone, parent_prefix)
+
+ self.reverse_zones[canonical_prefix] = rzone
+
+ def merge_parent_prefix(self, rzone, pprefix):
+
+ # parent items
+ p_ns = []
+
+ # scan parent prefix for services
+ for ip, ao in pprefix.aos.items():
+
+ ao_name = self.get_ao_name(ip, ao, pprefix,)
+
+ # add NS records if this is a DNS server
+ if ao.has_service(ip, 53, "udp"):
+ p_ns.append(ao_name)
+
+ # set DNS servers if none in rzone
+ if not rzone["ns"]:
+ rzone["ns"] = p_ns
+
+ def create_dhcp_rev(self, prefix):
+
+ dhcp_rzone = {}
+
+ for ip in netaddr.IPNetwork(prefix.dhcp_range).iter_hosts():
+ dhcp_rzone[str(ip)] = "dhcp%03d.%s." % (
+ ip.words[3],
+ prefix.domain_extension,
+ )
+
+ return dhcp_rzone
+
+ def get_ao_name(self, ip, ao, prefix):
+ short_name = ao.dns_name(ip, prefix)
+ return "%s.%s." % (short_name, prefix.domain_extension)
+
+
+@yaml.yaml_object(ydump)
+class NBDHCPSubnet:
+ def __init__(self, prefix):
+
+ self.domain_extension = prefix.domain_extension
+
+ self.subnet = None
+ self.range = None
+ self.first_ip = None
+ self.hosts = []
+ self.routers = []
+ self.dns_servers = []
+ self.dns_search = []
+ self.tftpd_server = None
+ self.ntp_servers = []
+ self.dhcpd_interface = None
+
+ self.add_prefix(prefix)
+
+ for ip, ao in prefix.aos.items():
+ self.add_ao(str(ip), ao, prefix)
+
+ parent_prefix = prefix.parent()
+
+ if parent_prefix:
+ self.merge_parent_prefix(parent_prefix)
+
+ def add_prefix(self, prefix):
+
+ self.subnet = str(prefix)
+
+ self.first_ip = str(netaddr.IPAddress(netaddr.IPNetwork(str(prefix)).first + 1))
+
+ self.dns_search = [prefix.domain_extension]
+
+ if prefix.dhcp_range:
+ self.range = prefix.dhcp_range
+
+ for ip, res in prefix.reserved_ips.items():
+ # routers are reserved IP's that start with 'router" in the IP description
+ if re.match("router", res["description"]):
+ router = {"ip": str(netaddr.IPNetwork(ip).ip)}
+
+ if (
+ "rfc3442routes" in res["custom_fields"]
+ and res["custom_fields"]["rfc3442routes"]
+ ):
+ # split on whitespace
+ router["rfc3442routes"] = re.split(
+ r"\s+", res["custom_fields"]["rfc3442routes"]
+ )
+
+ self.routers.append(router)
+
+ # set first IP to router if not set otherwise.
+ if not self.routers:
+ router = {"ip": self.first_ip}
+
+ self.routers.append(router)
+
+ def add_ao(self, ip, ao, prefix):
+
+ target_ip = str(netaddr.IPNetwork(ip).ip) # make bare IP, not CIDR format
+
+ # find the DHCP interface if it's this IP
+ if target_ip == self.first_ip:
+ self.dhcpd_interface = ao.interfaces_by_ip[ip].name
+
+ name = ao.dns_name(ip, prefix)
+
+ # add only devices that have a macaddr for this IP
+ if ip in ao.interfaces_by_ip:
+
+ mac_addr = dict(ao.interfaces_by_ip[ip]).get("mac_address")
+
+ if mac_addr and mac_addr.strip(): # if exists and not blank
+ self.hosts.append(
+ {"name": name, "ip_addr": target_ip, "mac_addr": mac_addr.lower(),}
+ )
+
+ # add dns servers
+ if ao.has_service(ip, 53, "udp"):
+ self.dns_servers.append(target_ip)
+
+ # add tftp server
+ if ao.has_service(ip, 69, "udp"):
+ if not self.tftpd_server:
+ self.tftpd_server = target_ip
+ else:
+ logger.warning(
+ "Duplicate TFTP servers in prefix, using first of %s and %s",
+ self.tftpd_server,
+ target_ip,
+ )
+
+ # add NTP servers
+ if ao.has_service(ip, 123, "udp"):
+ self.ntp_servers.append(target_ip)
+
+ def merge_parent_prefix(self, pprefix):
+
+ # parent items
+ p_dns_servers = []
+ p_tftpd_server = None
+ p_ntp_servers = []
+
+ # scan parent prefix for services
+ for ip, ao in pprefix.aos.items():
+
+ target_ip = str(netaddr.IPNetwork(ip).ip)
+
+ # add dns servers
+ if ao.has_service(ip, 53, "udp"):
+ p_dns_servers.append(target_ip)
+
+ # add tftp server
+ if ao.has_service(ip, 69, "udp"):
+ if not p_tftpd_server:
+ p_tftpd_server = target_ip
+ else:
+ logger.warning(
+ "Duplicate TFTP servers in parent prefix, using first of %s and %s",
+ p_tftpd_server,
+ target_ip,
+ )
+
+ # add NTP servers
+ if ao.has_service(ip, 123, "udp"):
+ p_ntp_servers.append(target_ip)
+
+ # merge if doesn't exist in prefix
+ if not self.dns_servers:
+ self.dns_servers = p_dns_servers
+
+ if not self.tftpd_server:
+ self.tftpd_server = p_tftpd_server
+
+ if not self.ntp_servers:
+ self.ntp_servers = p_ntp_servers
+
+ @classmethod
+ def to_yaml(cls, representer, node):
+ return representer.represent_dict(
+ {
+ "subnet": node.subnet,
+ "range": node.range,
+ "routers": node.routers,
+ "hosts": node.hosts,
+ "dns_servers": node.dns_servers,
+ "dns_search": node.dns_search,
+ "tftpd_server": node.tftpd_server,
+ "ntp_servers": node.ntp_servers,
+ }
+ )
diff --git a/scripts/netbox_edgeconfig.py b/scripts/netbox_edgeconfig.py
deleted file mode 100644
index 9f4e9d6..0000000
--- a/scripts/netbox_edgeconfig.py
+++ /dev/null
@@ -1,643 +0,0 @@
-#!/usr/bin/env python3
-
-# SPDX-FileCopyrightText: © 2020 Open Networking Foundation <support@opennetworking.org>
-# SPDX-License-Identifier: Apache-2.0
-
-# netbox_edgeconfig.py
-# given a s
-
-from __future__ import absolute_import
-
-import argparse
-import json
-import logging
-import netaddr
-import os
-import re
-import ssl
-import urllib.parse
-import urllib.request
-from ruamel import yaml
-
-# create shared logger
-logging.basicConfig()
-logger = logging.getLogger("nbec")
-
-# global dict of jsonpath expressions -> compiled jsonpath parsers, as
-# reparsing expressions in each loop results in 100x longer execution time
-jpathexpr = {}
-
-# headers to pass, set globally
-headers = []
-
-# settings
-settings = {}
-
-# cached data from API
-device_interface_cache = {}
-device_services_cache = {}
-interface_mac_cache = {}
-
-# parent prefixes
-parent_prefixes = {}
-
-
-def parse_nb_args():
- """
- parse CLI arguments
- """
-
- parser = argparse.ArgumentParser(description="NetBox Edge Config")
-
- # Positional args
- parser.add_argument(
- "settings",
- type=argparse.FileType("r"),
- help="YAML ansible inventory file w/NetBox API token",
- )
-
- parser.add_argument(
- "--debug", action="store_true", help="Print additional debugging information"
- )
-
- return parser.parse_args()
-
-
-def json_api_get(
- url,
- headers,
- data=None,
- trim_prefix=False,
- allow_failure=False,
- validate_certs=False,
-):
- """
- Call JSON API endpoint, return data as a dict
- """
-
- logger.debug("json_api_get url: %s", url)
-
- # if data included, encode it as JSON
- if data:
- data_enc = str(json.dumps(data)).encode("utf-8")
-
- request = urllib.request.Request(url, data=data_enc, method="POST")
- request.add_header("Content-Type", "application/json; charset=UTF-8")
- else:
- request = urllib.request.Request(url)
-
- # add headers tuples
- for header in headers:
- request.add_header(*header)
-
- try:
-
- if validate_certs:
- response = urllib.request.urlopen(request)
-
- else:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
-
- response = urllib.request.urlopen(request, context=ctx)
-
- except urllib.error.HTTPError:
- # asking for data that doesn't exist results in a 404, just return nothing
- if allow_failure:
- return None
- logger.exception("Server encountered an HTTPError at URL: '%s'", url)
- except urllib.error.URLError:
- logger.exception("An URLError occurred at URL: '%s'", url)
- else:
- # docs: https://docs.python.org/3/library/json.html
- jsondata = response.read()
- logger.debug("API response: %s", jsondata)
-
- try:
- data = json.loads(jsondata)
- except json.decoder.JSONDecodeError:
- # allow return of no data
- if allow_failure:
- return None
- logger.exception("Unable to decode JSON")
- else:
- logger.debug("JSON decoded: %s", data)
-
- return data
-
-
-def create_dns_zone(extension, devs, parent_devs={}):
- # Checks for dns entries
-
- a_recs = {} # PTR records created by inverting this
- cname_recs = {}
- srv_recs = {}
- ns_recs = []
- txt_recs = {}
-
- # scan through devs and look for dns_name, if not, make from name and
- # extension
- for name, value in devs.items():
-
- # add DNS entries for every DHCP host if there's a DHCP range
- # DHCP addresses are of the form dhcp###.extension
- if name == "prefix_dhcp":
- for ip in netaddr.IPNetwork(value["dhcp_range"]).iter_hosts():
- a_recs["dhcp%03d" % (ip.words[3])] = str(ip)
-
- continue
-
- # require DNS names to only use ASCII characters (alphanumeric, lowercase, with dash/period)
- # _'s are used in SRV/TXT records, but in general use aren't recommended
- dns_name = re.sub("[^a-z0-9.-]", "-", name, 0, re.ASCII)
-
- # Add as an A record (and inverse, PTR record), only if it's a new name
- if dns_name not in a_recs:
- a_recs[dns_name] = value["ip4"]
- else:
- # most likely a data entry error
- logger.warning(
- "Duplicate DNS name '%s' for devices at IP: '%s' and '%s', ignoring",
- dns_name,
- a_recs[dns_name],
- value["ip4"],
- )
- continue
-
- # if a DNS name is given as a part of the IP address, it's viewed as a CNAME
- if value["dns_name"]:
-
- if re.search("%s$" % extension, value["dns_name"]):
-
- # strip off the extension, and add as a CNAME
- dns_cname = value["dns_name"].split(".%s" % extension)[0]
-
- elif "." in value["dns_name"]:
- logger.warning(
- "Device '%s' has a IP assigned DNS name '%s' outside the prefix extension: '%s', ignoring",
- name,
- value["dns_name"],
- extension,
- )
- continue
-
- else:
- dns_cname = value["dns_name"]
-
- if dns_cname == dns_name:
- logger.warning(
- "DNS Name field '%s' is identical to device name '%s', ignoring",
- value["dns_name"],
- dns_name,
- )
- else:
- cname_recs[dns_cname] = "%s.%s." % (dns_name, extension)
-
- # Add services as cnames, and possibly ns records
- for svc in value["services"]:
-
- # only add service if it uses the IP of the host
- if value["ip4"] in svc["ip4s"]:
- cname_recs[svc["name"]] = "%s.%s." % (dns_name, extension)
-
- if svc["port"] == 53 and svc["protocol"] == "udp":
- ns_recs.append("%s.%s." % (dns_name, extension))
-
- # iterate over the parent devs to add additional nameservers
- for pname, pval in parent_devs.items():
- if "services" in pval:
- for svc in pval["services"]:
- # look for DNS servers
- if svc["port"] == 53 and svc["protocol"] == "udp":
- # make name
- dns_name = re.sub("[^a-z0-9.-]", "-", pname, 0, re.ASCII)
-
- # add an a record for this nameserver if IP is outside of subnet
- a_recs[dns_name] = pval["ip4"]
-
- # add a NS record if it doesn't already exist
- ns_name = "%s.%s." % (dns_name, extension)
- if ns_name not in ns_recs:
- ns_recs.append(ns_name)
-
- return {
- "a": a_recs,
- "cname": cname_recs,
- "ns": ns_recs,
- "srv": srv_recs,
- "txt": txt_recs,
- }
-
-
-def create_dhcp_subnet(prefix, prefix_search, devs, parent_devs={}):
- # makes DHCP subnet information
-
- subnet = {}
-
- subnet["subnet"] = prefix
- subnet["dns_search"] = [prefix_search]
-
- def dhcp_iterate(devs):
- # inner function to iterate over a dev list
- ihosts = []
- idyn_range = None
- irouter = []
- idns_servers = []
- itftpd_server = None
-
- for name, value in devs.items():
-
- # handle a DHCP range
- if name == "prefix_dhcp":
- idyn_range = value["dhcp_range"]
- continue
-
- # handle a router reservation
- if name == "router":
- ir = {"ip": value["ip4"]}
- if (
- "rfc3442routes" in value["custom_fields"]
- and value["custom_fields"]["rfc3442routes"]
- ):
- ir["rfc3442routes"] = value["custom_fields"]["rfc3442routes"].split(
- ","
- )
-
- irouter.append(ir)
- continue
-
- # has a MAC address, and it's not null
- if "macaddr" in value and value["macaddr"]:
-
- ihosts.append(
- {
- "name": name,
- "ip_addr": value["ip4"],
- "mac_addr": value["macaddr"].lower(),
- }
- )
-
- # Add dns based on service entries
- if "services" in value:
- for svc in value["services"]:
-
- # add DNS server
- if svc["port"] == 53 and svc["protocol"] == "udp":
- idns_servers.append(value["ip4"])
-
- # add tftp server
- if svc["port"] == 69 and svc["protocol"] == "udp":
- itftpd_server = value["ip4"]
-
- return (ihosts, idyn_range, irouter, idns_servers, itftpd_server)
-
- # run inner function and build
- hosts, dyn_range, router, dns_servers, tftpd_server = dhcp_iterate(devs)
-
- # assign only hosts, dynamic range, based on the prefix
- subnet["hosts"] = hosts
- subnet["range"] = dyn_range
-
- # only assign router if specified
- if router:
- subnet["routers"] = router
-
- # find parent prefix devices, to fill in where needed
- phosts, pdyn_range, prouter, pdns_servers, ptftpd_server = dhcp_iterate(parent_devs)
-
- # use parent prefix devices if dns/tftp services needed aren't found within prefix
- if dns_servers:
- subnet["dns_servers"] = dns_servers
- else:
- subnet["dns_servers"] = pdns_servers
-
- if tftpd_server:
- subnet["tftpd_server"] = tftpd_server
- else:
- subnet["tftpd_server"] = ptftpd_server
-
- return subnet
-
-
-def find_dhcpd_interface(prefix, devs):
- # DHCPd interface is first usable IP in range
-
- first_ip = str(netaddr.IPAddress(netaddr.IPNetwork(prefix).first + 1))
-
- # look for interface corresponding to first IP address in range
- for name, value in devs.items():
- if "ip4" in value:
- if value["ip4"] == first_ip:
- return value["iface"]
-
- # if interface not found, return None and ignore
- return None
-
-
-def get_device_services(device_id, filters=""):
-
- if device_id in device_services_cache:
- return device_services_cache[device_id]
-
- # get services info
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/services/?device_id=%s%s" % (device_id, filters),
- )
-
- raw_svcs = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- services = []
-
- for rsvc in raw_svcs["results"]:
-
- svc = {}
-
- svc["name"] = rsvc["name"]
- svc["description"] = rsvc["description"]
- svc["port"] = rsvc["port"]
- svc["protocol"] = rsvc["protocol"]["value"]
- svc["ip4s"] = []
-
- for ip in rsvc["ipaddresses"]:
- svc["ip4s"].append(str(netaddr.IPNetwork(ip["address"]).ip))
-
- services.append(svc)
-
- device_services_cache[device_id] = services
- return services
-
-
-def get_interface_mac_addr(interface_id):
- # return a mac addres, or None if undefined
- if interface_id in interface_mac_cache:
- return interface_mac_cache[interface_id]
-
- # get the interface info
- url = "%s%s" % (settings["api_endpoint"], "api/dcim/interfaces/%s/" % interface_id)
-
- iface = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- if iface["mac_address"]:
- interface_mac_cache[interface_id] = iface["mac_address"]
- return iface["mac_address"]
-
- interface_mac_cache[interface_id] = None
- return None
-
-
-def get_device_interfaces(device_id, filters=""):
-
- if device_id in device_interface_cache:
- return device_interface_cache[device_id]
-
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/dcim/interfaces/?device_id=%s%s&mgmt_only=true" % (device_id, filters),
- )
-
- logger.debug("raw_ifaces_url: %s", url)
-
- raw_ifaces = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_ifaces: %s", raw_ifaces)
-
- ifaces = []
-
- for raw_iface in raw_ifaces["results"]:
-
- iface = {}
-
- iface["name"] = raw_iface["name"]
- iface["macaddr"] = raw_iface["mac_address"]
- iface["mgmt_only"] = raw_iface["mgmt_only"]
- iface["description"] = raw_iface["description"]
-
- if raw_iface["count_ipaddresses"]:
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/ip-addresses/?interface_id=%s" % raw_iface["id"],
- )
-
- raw_ip = json_api_get(
- url, headers, validate_certs=settings["validate_certs"]
- )
-
- iface["ip4"] = str(netaddr.IPNetwork(raw_ip["results"][0]["address"]).ip)
-
- ifaces.append(iface)
-
- device_interface_cache[device_id] = ifaces
- return ifaces
-
-
-def get_prefix_devices(prefix, filters=""):
-
- # get all devices in a prefix
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/ip-addresses/?parent=%s%s" % (prefix, filters),
- )
-
- raw_ips = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_ips: %s", raw_ips)
-
- devs = {}
-
- # iterate by IP, sorted
- for ip in sorted(raw_ips["results"], key=lambda k: k["address"]):
-
- logger.debug("ip: %s", ip)
-
- # if it's a DHCP range, add that range to the dev list as prefix_dhcp
- if ip["status"]["value"] == "dhcp":
- devs["prefix_dhcp"] = {"dhcp_range": ip["address"]}
- continue
-
- # if it's a reserved IP
- if ip["status"]["value"] == "reserved":
- res = {}
-
- res["type"] = "reserved"
- res["description"] = ip["description"]
- res["ip4"] = str(netaddr.IPNetwork(ip["address"]).ip)
- res["dns_name"] = ip["dns_name"] if "dns_name" in ip else "None"
- res["services"] = {}
- res["custom_fields"] = ip["custom_fields"]
-
- resname = res["description"].lower().split(" ")[0]
-
- devs[resname] = res
- continue
-
- aotype = ip["assigned_object_type"]
-
- # don't handle VM's yet
- if aotype == "virtualization.vminterface":
- continue
-
- dev = {}
-
- dev["type"] = "device"
- dev["ip4"] = str(netaddr.IPNetwork(ip["address"]).ip)
- dev["macaddr"] = get_interface_mac_addr(ip["assigned_object"]["id"])
-
- ifaces = get_device_interfaces(ip["assigned_object"]["device"]["id"])
-
- if ifaces and dev["ip4"] == ifaces[0]["ip4"]: # this is a mgmt IP
- devname = "%s-%s" % (
- ip["assigned_object"]["device"]["name"].lower().split(".")[0],
- ifaces[0]["name"],
- )
- dev["iface"] = ip["assigned_object"]["name"]
- dev["dns_name"] = ""
- dev["services"] = []
-
- else: # this is a primary IP
-
- name = ip["assigned_object"]["device"]["name"]
- devname = name.lower().split(".")[0]
-
- dev["iface"] = ip["assigned_object"]["name"]
- dev["dns_name"] = ip["dns_name"] if "dns_name" in ip else "None"
- dev["services"] = get_device_services(ip["assigned_object"]["device"]["id"])
-
- # fix multihomed devices in same IP range
- # FIXME: Does not handle > 2 connections properly
- if devname in devs:
- devs["%s-1" % devname] = devs.pop(devname)
- devs["%s-2" % devname] = dev
- else:
- devs[devname] = dev
-
- return devs
-
-
-def get_parent_prefix(child_prefix):
- # returns a parent prefix given a child prefix
- # FIXME: only returns the first found prefix, so doesn't handle more than 2 layers of hierarchy
-
- # get all devices in a prefix
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/prefixes/?contains=%s" % child_prefix,
- )
-
- raw_prefixes = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug(raw_prefixes)
-
- for prefix in raw_prefixes["results"]:
- if prefix["prefix"] != child_prefix:
- return prefix["prefix"]
-
- return None
-
-
-def get_prefix_data(prefix):
-
- # get all devices in a prefix
- url = "%s%s" % (settings["api_endpoint"], "api/ipam/prefixes/?prefix=%s" % prefix)
-
- raw_prefix = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_prefix: %s", raw_prefix)
-
- return raw_prefix["results"][0]
-
-
-# main function that calls other functions
-if __name__ == "__main__":
-
- args = parse_nb_args()
-
- # only print log messages if debugging
- if args.debug:
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
-
- # load settings from yaml file
- settings = yaml.safe_load(args.settings.read())
-
- yaml_out = {}
-
- # load default config
- with open(
- os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "base_edgeconfig.yaml"
- )
- ) as defconfig:
- yaml_out = yaml.safe_load(defconfig)
-
- logger.debug("settings: %s" % settings)
-
- # global, so this isn't run multiple times
- headers = [
- ("Authorization", "Token %s" % settings["token"]),
- ]
-
- # create structure from extracted data
- dns_zones = {}
- dns_rev_zones = {}
- dhcpd_subnets = []
- dhcpd_interfaces = []
- devs_per_prefix = {}
- prefixes = {}
- parent_prefixes = {}
-
- for prefix in settings["ip_prefixes"]:
-
- prefix_data = get_prefix_data(prefix)
-
- parent_prefix = get_parent_prefix(prefix)
- prefix_data["parent"] = parent_prefix
-
- pdevs = {}
- if parent_prefix:
- if parent_prefix in parent_prefixes:
- pdevs = devs_per_prefix[parent_prefix]
- else:
- pdevs = get_prefix_devices(parent_prefix)
- devs_per_prefix[parent_prefix] = pdevs
-
- prefix_data["parent_devs"] = pdevs
-
- prefixes[prefix] = prefix_data
-
- prefix_domain_extension = prefix_data["description"]
-
- devs = get_prefix_devices(prefix)
-
- devs_per_prefix[prefix] = devs
-
- dns_zones[prefix_domain_extension] = create_dns_zone(
- prefix_domain_extension, devs, pdevs
- )
-
- dns_zones[prefix_domain_extension]["ip_range"] = prefix
-
- dhcpd_subnets.append(
- create_dhcp_subnet(prefix, prefix_domain_extension, devs, pdevs)
- )
-
- dhcpd_if = find_dhcpd_interface(prefix, devs)
-
- if dhcpd_if and dhcpd_if not in dhcpd_interfaces:
- dhcpd_interfaces.append(dhcpd_if)
-
- yaml_out.update(
- {
- "dns_zones": dns_zones,
- "dns_rev_zones": dns_rev_zones,
- "dhcpd_subnets": dhcpd_subnets,
- "dhcpd_interfaces": dhcpd_interfaces,
- # the below are useful when debugging
- # "devs_per_prefix": devs_per_prefix,
- # "prefixes": prefixes,
- }
- )
-
- print(yaml.safe_dump(yaml_out, indent=2))
diff --git a/scripts/netbox_pxe.py b/scripts/netbox_pxe.py
deleted file mode 100644
index 427395a..0000000
--- a/scripts/netbox_pxe.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-
-# SPDX-FileCopyrightText: © 2020 Open Networking Foundation <support@opennetworking.org>
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import absolute_import
-
-import argparse
-import json
-import logging
-import netaddr
-import re
-import ssl
-import urllib.parse
-import urllib.request
-from ruamel import yaml
-
-# create shared logger
-logging.basicConfig()
-logger = logging.getLogger("nbht")
-
-# headers to pass, set globally
-headers = []
-
-# settings
-settings = {}
-
-def parse_nb_args():
- """
- parse CLI arguments
- """
-
- parser = argparse.ArgumentParser(description="NetBox Host Descriptions")
-
- # Positional args
- parser.add_argument(
- "settings",
- type=argparse.FileType("r"),
- help="YAML ansible inventory file w/netbox info",
- )
-
- parser.add_argument(
- "--debug", action="store_true", help="Print additional debugging information"
- )
-
- return parser.parse_args()
-
-
-def json_api_get(
- url,
- headers,
- data=None,
- trim_prefix=False,
- allow_failure=False,
- validate_certs=False,
-):
- """
- Call JSON API endpoint, return data as a dict
- """
-
- logger.debug("json_api_get url: %s", url)
-
- # if data included, encode it as JSON
- if data:
- data_enc = str(json.dumps(data)).encode("utf-8")
-
- request = urllib.request.Request(url, data=data_enc, method="POST")
- request.add_header("Content-Type", "application/json; charset=UTF-8")
- else:
- request = urllib.request.Request(url)
-
- # add headers tuples
- for header in headers:
- request.add_header(*header)
-
- try:
-
- if validate_certs:
- response = urllib.request.urlopen(request)
-
- else:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
-
- response = urllib.request.urlopen(request, context=ctx)
-
- except urllib.error.HTTPError:
- # asking for data that doesn't exist results in a 404, just return nothing
- if allow_failure:
- return None
- logger.exception("Server encountered an HTTPError at URL: '%s'", url)
- except urllib.error.URLError:
- logger.exception("An URLError occurred at URL: '%s'", url)
- else:
- # docs: https://docs.python.org/3/library/json.html
- jsondata = response.read()
- logger.debug("API response: %s", jsondata)
-
- try:
- data = json.loads(jsondata)
- except json.decoder.JSONDecodeError:
- # allow return of no data
- if allow_failure:
- return None
- logger.exception("Unable to decode JSON")
- else:
- logger.debug("JSON decoded: %s", data)
-
- return data
-
-
-def get_pxe_devices(tenant_group, filters=""):
-
- # get all devices in a prefix
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/dcim/devices/?tenant_group=%s%s" % (tenant_group, filters),
- )
-
- print(url)
-
- raw_devs = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_devs: %s", raw_devs)
-
- devs = []
-
- for item in raw_devs["results"]:
- dev = {}
- dev["serial"] = item["serial"]
- dev["hostname"] = item["name"]
- dev["domain"] = "aetherproject.net"
-
- devs.append(dev)
-
- return devs
-
-
-# main function that calls other functions
-if __name__ == "__main__":
-
- args = parse_nb_args()
-
- # only print log messages if debugging
- if args.debug:
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
-
- # load settings from yaml file
- settings = yaml.safe_load(args.settings.read())
-
- logger.info("settings: %s" % settings)
-
- # global, so this isn't run multiple times
- headers = [
- ("Authorization", "Token %s" % settings["token"]),
- ]
-
- # create structure from extracted data
-
- pxe_devices = get_pxe_devices("aether", "&role_id=1")
-
- print(yaml.safe_dump({"pxeboot_hosts": pxe_devices}, indent=2))
diff --git a/scripts/pxeconfig.py b/scripts/pxeconfig.py
new file mode 100644
index 0000000..34e45a0
--- /dev/null
+++ b/scripts/pxeconfig.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
+# pxeconfig.py
+# Given a yaml config file (same as ansible inventory for a site), create a
+# YAML file consumable by ansible that has input for the pxeboot role, for creating
+# preseed files for servers
+
+from __future__ import absolute_import
+
+import nbhelper
+import json
+import pprint
+
+from ruamel import yaml
+
+# main function that calls other functions
+if __name__ == "__main__":
+
+ # this is passed to argparse, key is option name, rest is kwargs
+ extra_args = {
+ "domain_extension": {
+ "default": "aetherproject.net",
+ "nargs": "?",
+ "type": ascii,
+ "help": "Domain extension (optional, default: aetherproject.net)",
+ },
+ }
+
+ args = nbhelper.parse_cli_args(extra_args)
+ nbh = nbhelper.NBHelper(args)
+
+ yaml_out = {}
+ pxeboot_hosts = []
+
+ prefixes = nbh.all_prefixes()
+ devices = nbhelper.NBDevice.all_devs()
+
+ for dev_id, device in devices.items():
+
+ # only pxeboot for servers
+ if device.data["device_role"]["slug"] == "server":
+
+ pxe_dev = {}
+ pxe_dev["serial"] = device.data["serial"]
+ pxe_dev["hostname"] = device.data["name"]
+ pxe_dev["domain"] = args.domain_extension
+ pxe_dev["mac_address"] = device.primary_iface()["mac_address"].lower()
+
+ pxeboot_hosts.append(pxe_dev)
+
+ # yaml_out["devices"] = devices
+ yaml_out["pxeboot_hosts"] = pxeboot_hosts
+
+ print(yaml.safe_dump(yaml_out, indent=2))