Updates to scripts after refactor
- Run black to reformat all the scripts
- Update makefile test targets, pylint, and fix some of the issues found
- Update pxeconfig script for refactored nbhelper
- Add start of inventory script
Change-Id: I5f426ac2da840dc72f07f8a6844e199e47d49135
diff --git a/Makefile b/Makefile
index 8593cc2..08886e3 100644
--- a/Makefile
+++ b/Makefile
@@ -12,13 +12,12 @@
# ansible files is all top-level playbooks
ANSIBLE_PLAYBOOKS ?= $(wildcard *.yml)
-ANSIBLE_ROLES ?= $(wildcard roles/*)
# YAML files, excluding venv and cookiecutter directories
YAML_FILES ?= $(shell find . -type d \( -path "./venv_onfansible" -o -path "./cookiecutters" -o -path "./ansible_collections" -o -path "./roles" -o -path "./inventory/host_vars" \) -prune -o -type f \( -name '*.yaml' -o -name '*.yml' \) -print )
# all files with extensions
-PYTHON_FILES ?= $(wildcard scripts/*.py filter_plugins/*.py lint_rules/*.py cookiecutters/*/hooks/*.py roles/*/filter_plugins/*.py)
+PYTHON_FILES ?= $(wildcard scripts/*.py scripts/*/*.py filter_plugins/*.py lint_rules/*.py cookiecutters/*/hooks/*.py)
.DEFAULT_GOAL := help
.PHONY: test lint yamllint ansiblelint license help
@@ -43,7 +42,7 @@
reuse --root . lint
# Cookiecutter tests
-test: yamllint ansiblelint flake8 pylint black ## run all standard tests
+test: ansiblelint flake8 pylint black ## run all standard tests
yamllint: $(VENV_NAME) ## lint YAML format using yamllint
source ./$</bin/activate ; set -u ;\
@@ -55,17 +54,17 @@
ansiblelint: $(VENV_NAME) ## lint ansible-specific format using ansible-lint
source ./$</bin/activate ; set -u ;\
ansible-lint --version ;\
- ansible-lint -R -v $(ANSIBLE_PLAYBOOKS) $(ANSIBLE_ROLES)
+ ansible-lint -R -v $(ANSIBLE_PLAYBOOKS)
flake8: $(VENV_NAME) ## check python formatting with flake8
source ./$</bin/activate ; set -u ;\
flake8 --version ;\
- flake8 --max-line-length 99 $(PYTHON_FILES)
+ flake8 --max-line-length 99 --per-file-ignores="__init__.py:F401" $(PYTHON_FILES)
pylint: $(VENV_NAME) ## pylint check for python 3 compliance
source ./$</bin/activate ; set -u ;\
pylint --version ;\
- pylint --py3k $(PYTHON_FILES)
+ pylint --rcfile=pylint.ini $(PYTHON_FILES)
black: $(VENV_NAME) ## run black on python files in check mode
source ./$</bin/activate ; set -u ;\
diff --git a/cookiecutters/role/hooks/post_gen_project.py b/cookiecutters/role/hooks/post_gen_project.py
index 49667ae..c915d1f 100644
--- a/cookiecutters/role/hooks/post_gen_project.py
+++ b/cookiecutters/role/hooks/post_gen_project.py
@@ -34,7 +34,7 @@
# script is rendered as a template, so this will be filled in with the
# cookiecutter dict, which is why noqa is needed.
-CONTEXT = {{cookiecutter | jsonify}} # noqa: F821, E227
+CONTEXT = {{cookiecutter | jsonify}} # noqa: F821, E227 pylint: disable=E0602
def delete_file(filepath):
@@ -59,7 +59,7 @@
outfile_tmpl = env.from_string(infile)
delete_file(outfile_tmpl.render({"cookiecutter": CONTEXT}))
- platforms = list(CONTEXT["platforms"].keys())
+ platforms = list(CONTEXT["platforms"].keys()) # pylint: disable=E1136
# Combine Ubuntu and Debian as they're the same ansible_os_family
if "Ubuntu" in platforms and "Debian" not in platforms:
@@ -98,7 +98,7 @@
# delete any files that don't start with the license identifier
for licfile in license_files:
- if not licfile.startswith(CONTEXT["license"]):
+ if not licfile.startswith(CONTEXT["license"]): # pylint: disable=E1136
os.remove(os.path.join(os.path.curdir, "LICENSES", licfile))
diff --git a/lint_rules/NoTags.py b/lint_rules/NoTags.py
index d74eab6..e4144a9 100644
--- a/lint_rules/NoTags.py
+++ b/lint_rules/NoTags.py
@@ -19,7 +19,7 @@
tags = ["idiom"]
severity = "HIGH"
- def matchtask(self, file, task):
+ def matchtask(self, file, task): # pylint: disable=W0613, R0201
# Task should not have tags
if "tags" in task:
diff --git a/playbooks/aethercompute-playbook.yml b/playbooks/aethercompute-playbook.yml
index ba97a0f..551db47 100644
--- a/playbooks/aethercompute-playbook.yml
+++ b/playbooks/aethercompute-playbook.yml
@@ -6,5 +6,6 @@
become: true
roles:
- netprep
+ - sriov
- users
- docker
diff --git a/playbooks/qa-playbook.yml b/playbooks/qa-playbook.yml
index d81b35f..d2fde4d 100644
--- a/playbooks/qa-playbook.yml
+++ b/playbooks/qa-playbook.yml
@@ -17,6 +17,7 @@
- "ethtool"
- "iperf"
- "ipvsadm"
+ - "python-scapy"
- "tcpdump"
- "tcpreplay"
- "traceroute"
diff --git a/pylint.ini b/pylint.ini
new file mode 100644
index 0000000..8e28ddf
--- /dev/null
+++ b/pylint.ini
@@ -0,0 +1,20 @@
+# pylint config (ini format)
+#
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
+[MESSAGES CONTROL]
+
+disable= invalid-name,
+ missing-class-docstring,
+ missing-function-docstring,
+ missing-module-docstring,
+ import-outside-toplevel,
+ fixme,
+ too-few-public-methods,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-branches,
+ too-many-statements,
+ too-many-instance-attributes,
+ global-statement
diff --git a/scripts/edgeconfig.py b/scripts/edgeconfig.py
index 7034f6c..8131335 100644
--- a/scripts/edgeconfig.py
+++ b/scripts/edgeconfig.py
@@ -11,9 +11,10 @@
from __future__ import absolute_import
import argparse
-import nbhelper
import os
+import nbhelper
+
from ruamel.yaml import YAML
# main function that calls other functions
diff --git a/scripts/inventory.py b/scripts/inventory.py
new file mode 100644
index 0000000..26edcdd
--- /dev/null
+++ b/scripts/inventory.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
+# inventory.py
+# create an inventory file for a site, in YAML format
+# currently aether specific
+
+import nbhelper
+from ruamel import yaml
+
+if __name__ == "__main__":
+
+ extra_args = {
+ "--generic": {
+ "action": "store_true",
+ "help": "Use generic output, instead of Aether-specific",
+ },
+ }
+
+ args = nbhelper.initialize(extra_args)
+ tenant = nbhelper.Tenant()
+
+ routers = {}
+ servers = {}
+ switches = {}
+
+ for device in tenant.get_devices():
+
+ dev_name = device.data["name"]
+
+ dev_vars = {
+ "ansible_host": str(device.primary_ip),
+ }
+
+ if device.data.device_role.slug == "router":
+ routers[dev_name] = dev_vars
+
+ elif device.data.device_role.slug == "server":
+ servers[dev_name] = dev_vars
+
+ elif device.data.device_role.slug == "switch":
+ switches[dev_name] = dev_vars
+
+ if args.generic:
+ groups = {
+ "routers": {"hosts": routers},
+ "servers": {"hosts": servers},
+ "switches": {"hosts": switches},
+ }
+ else:
+ groups = {
+ "aethermgmt": {"hosts": routers},
+ "aethercompute": {"hosts": servers},
+ "aetherfabric": {"hosts": switches},
+ }
+
+ yaml_out = {"all": {"children": groups}}
+
+ print(yaml.safe_dump(yaml_out, indent=2))
diff --git a/scripts/nbhelper/__init__.py b/scripts/nbhelper/__init__.py
index 7755623..85acea5 100644
--- a/scripts/nbhelper/__init__.py
+++ b/scripts/nbhelper/__init__.py
@@ -1,3 +1,6 @@
+# SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
+# SPDX-License-Identifier: Apache-2.0
+
from .utils import initialize
from .tenant import Tenant
from .device import Device, VirtualMachine
diff --git a/scripts/nbhelper/container.py b/scripts/nbhelper/container.py
index 397ee32..0e728a1 100644
--- a/scripts/nbhelper/container.py
+++ b/scripts/nbhelper/container.py
@@ -1,4 +1,5 @@
import netaddr
+from .utils import logger
class Singleton(type):
@@ -10,7 +11,7 @@
return cls._instances[cls]
-class Container(object):
+class Container:
def __init__(self):
self.instances = dict()
@@ -36,16 +37,19 @@
for instance in self.instances.values():
if "dns" in list(map(str, instance.services)):
return instance
+ return None
def getDHCPServer(self):
for instance in self.instances.values():
if "tftp" in list(map(str, instance.services)):
return instance
+ return None
def getNTPServer(self):
for instance in self.instances.values():
if "ntp" in list(map(str, instance.services)):
return instance
+ return None
def getRouters(self):
""" Get a list of Devices/VMs which type is Router """
@@ -72,6 +76,7 @@
prefix.subnet
):
return str(netaddr.IPNetwork(address).ip)
+ return None
class DeviceContainer(AssignedObjectContainer, metaclass=Singleton):
@@ -87,9 +92,6 @@
class PrefixContainer(Container, metaclass=Singleton):
# PrefixContainer holds all prefixes fetch from Netbox, prefix(str) as key
- def get(self, instance_id, name_segments=1):
- return super().get(instance_id)
-
def all(self):
return self.instances.values()
@@ -221,7 +223,8 @@
# "eno1": {
# "mgmtOnly": False,
# "macaddr": "ca:fe:ba:be:11:11",
- # "ipaddr": [IPAddress("10.32.4.129"), IPAddress("10.32.4.130")]
+ # "ipaddr": [IPAddress("10.32.4.129"),
+ # IPAddress("10.32.4.130")]
# }
# }
# "mgmtswitch1": ...
@@ -242,14 +245,24 @@
]
interfaceDict.setdefault("mgmtOnly", False)
- # Use interface["mac_address"] as the default value, but if the mac_address
- # is None, that means we are dealing with a virtual interfaces
- # so we can get the linked interface's mac_address instead
+ # Use interface["mac_address"] as the default value, but if the
+ # mac_address is None, that means we are dealing with a virtual
+ # interfaces so we can get the linked interface's mac_address instead
- interfaceDict.setdefault(
- "mac_address", interface["mac_address"] or
- device.interfaces[interface["instance"].label]["mac_address"]
- )
+ try:
+ interfaceDict.setdefault(
+ "mac_address",
+ interface["mac_address"]
+ or device.interfaces[interface["instance"].label][
+ "mac_address"
+ ],
+ )
+ except KeyError:
+ logger.error(
+ "Problem with MAC address on interface %s",
+ interface,
+ )
+
interfaceDict.setdefault("ip_addresses", list())
interfaceDict["ip_addresses"].append(address)
diff --git a/scripts/nbhelper/device.py b/scripts/nbhelper/device.py
index 94b03df..32a2075 100644
--- a/scripts/nbhelper/device.py
+++ b/scripts/nbhelper/device.py
@@ -4,12 +4,11 @@
# SPDX-License-Identifier: Apache-2.0
# device.py
-#
+import sys
import netaddr
-from .utils import logger, clean_name_dns
-from .network import Prefix
+from .utils import logger
from .container import DeviceContainer, VirtualMachineContainer, PrefixContainer
@@ -40,7 +39,7 @@
objects = dict()
def __init__(self, data):
- from .utils import netboxapi, netbox_config
+ from .utils import netboxapi
self.data = data
self.nbapi = netboxapi
@@ -49,6 +48,7 @@
self.id = self.data.id
self.tenant = None
self.primary_ip = None
+ self.primary_iface = None
# In Netbox, we use FQDN as the Device name, but in the script,
# we use the first segment to be the name of device.
@@ -108,8 +108,12 @@
# ipam.ip_addresses doesn't have primary tag,
# the primary tag is only available is only in the Device.
# So we need to compare address to check which one is primary ip
- if address.address == self.primary_ip.address:
- interface["isPrimary"] = True
+ try:
+ if address.address == self.primary_ip.address:
+ interface["isPrimary"] = True
+ self.primary_iface = interface
+ except AttributeError:
+ logger.error("Error with primary address for device %s", self.fullname)
# mgmt_only = False is a hack for VirtualMachine type
if self.__class__ == VirtualMachine:
@@ -181,7 +185,7 @@
self.netplan_config["ethernets"].setdefault(intfName, {})
self.netplan_config["ethernets"][intfName].setdefault(
"addresses", []
- ).append(address)
+ ).extend(interface["addresses"])
# If the current selected device is a Server
elif isinstance(self, Device) and self.data.device_role.name == "Server":
@@ -233,7 +237,9 @@
for dest_addr in destination.split(","):
# If interface address is in destination subnet, we don't need this route
- if netaddr.IPNetwork(address).ip in netaddr.IPNetwork(dest_addr):
+ if netaddr.IPNetwork(address).ip in netaddr.IPNetwork(
+ dest_addr
+ ):
continue
new_route = {
@@ -298,7 +304,8 @@
}
)
- # Only management server needs to be configured the whitelist netrange of internal interface
+ # Only management server needs to be configured the whitelist netrange of
+ # internal interface
if self.data.device_role.name == "Router":
ret["interface_subnets"] = dict()
@@ -329,8 +336,13 @@
if prefix.subnet not in ret["interface_subnets"][intfName]:
ret["interface_subnets"][intfName].append(prefix.subnet)
for neighbor in prefix.neighbor:
- if neighbor.subnet not in ret["interface_subnets"][intfName]:
- ret["interface_subnets"][intfName].append(neighbor.subnet)
+ if (
+ neighbor.subnet
+ not in ret["interface_subnets"][intfName]
+ ):
+ ret["interface_subnets"][intfName].append(
+ neighbor.subnet
+ )
for prefix in PrefixContainer().all():
@@ -361,8 +373,6 @@
if self.extra_config:
return self.extra_config
- primary_ip = self.data.primary_ip.address if self.data.primary_ip else None
-
service_names = list(map(lambda x: x.name, self.services))
if "dns" in service_names:
diff --git a/scripts/nbhelper/network.py b/scripts/nbhelper/network.py
index 3a51219..9b5d824 100644
--- a/scripts/nbhelper/network.py
+++ b/scripts/nbhelper/network.py
@@ -10,7 +10,6 @@
from .utils import logger, check_name_dns
from .container import PrefixContainer
-from .container import DeviceContainer, VirtualMachineContainer
class Prefix:
@@ -71,7 +70,6 @@
if self not in prefix.neighbor:
prefix.neighbor.append(self)
-
def build_prefix(self):
"""
find ip information for items (devices/vms, reserved_ips, dhcp_range) in prefix
diff --git a/scripts/nbhelper/service.py b/scripts/nbhelper/service.py
index 1ad992c..fe78a9f 100644
--- a/scripts/nbhelper/service.py
+++ b/scripts/nbhelper/service.py
@@ -4,12 +4,9 @@
# SPDX-License-Identifier: Apache-2.0
# service.py
-#
-import re
import netaddr
-from .utils import logger, AttrDict
from .container import ServiceInfoContainer
@@ -66,7 +63,7 @@
for host in domain["hosts"]:
if ip_address == host["ip_addr"]:
return f"{host['name']}.{domainName}."
-
+ return ""
dnsForwardZoneConfigs = dict()
@@ -91,7 +88,9 @@
if ntpServer:
forwardZoneConfig["cname"]["ntp"] = getDomainNameByIP(ntpServer["address"])
if dhcpServer:
- forwardZoneConfig["cname"]["tftp"] = getDomainNameByIP(dhcpServer["address"])
+ forwardZoneConfig["cname"]["tftp"] = getDomainNameByIP(
+ dhcpServer["address"]
+ )
if dnsServer:
forwardZoneConfig["cname"]["ns"] = getDomainNameByIP(dnsServer["address"])
forwardZoneConfig["ns"].append(getDomainNameByIP(dnsServer["address"]))
@@ -126,7 +125,7 @@
if o1 == 10:
o2 = o3 = o4 = 0
cidr_plen = 8
- elif (o1 == 172 and o2 >= 16 and o2 <= 31) or (o1 == 192 and o2 == 168):
+ elif (o1 == 172 and 16 <= o2 <= 31) or (o1 == 192 and o2 == 168):
o3 = o4 = 0
cidr_plen = 16
diff --git a/scripts/nbhelper/tenant.py b/scripts/nbhelper/tenant.py
index 4d529a7..a4d4048 100644
--- a/scripts/nbhelper/tenant.py
+++ b/scripts/nbhelper/tenant.py
@@ -6,7 +6,9 @@
# tenant.py
# The tenant abstract object of Netbox Object - Tenant
-from .utils import logger, netboxapi
+import sys
+
+from .utils import logger
from .device import Device, VirtualMachine
from .network import Prefix
@@ -58,9 +60,9 @@
"""
for machine in self.devices + self.vms:
- if name and machine.name == name:
- return machine
- elif machine.data["device_role"]["name"] == "Router":
+ if (name and machine.name == name) or machine.data["device_role"][
+ "name"
+ ] == "Router":
return machine
ret_msg = (
@@ -71,10 +73,12 @@
logger.error(ret_msg, name)
sys.exit(1)
- def get_devices(self, device_types=["server", "router"]):
+ def get_devices(self, device_types=None):
"""
Get all devices (Router + Server) belong to this Tenant
"""
+ if not device_types:
+ device_types = ["server", "router"]
if not device_types:
return self.devices + self.vms
diff --git a/scripts/nbhelper/utils.py b/scripts/nbhelper/utils.py
index 5614ba9..d44bc96 100644
--- a/scripts/nbhelper/utils.py
+++ b/scripts/nbhelper/utils.py
@@ -6,11 +6,13 @@
# utils.py
# The utility functions shared among nbhelper objects
-import re
-import logging
import argparse
-import pynetbox
+import logging
+import re
+import sys
+
import requests
+import pynetbox
from ruamel import yaml
@@ -36,7 +38,7 @@
for require_args in ["api_endpoint", "token", "tenant_name"]:
if not netbox_config.get(require_args):
- logger.error("The require argument: %s was not set. Stop." % require_args)
+ logger.error("The require argument: %s was not set. Stop.", require_args)
sys.exit(1)
netboxapi = pynetbox.api(
@@ -53,7 +55,7 @@
return args
-def parse_cli_args(extra_args={}):
+def parse_cli_args(extra_args):
"""
parse CLI arguments. Can add extra arguments with a option:kwargs dict
"""
@@ -115,5 +117,5 @@
class AttrDict(dict):
def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.__dict__ = self
diff --git a/scripts/netbox_hosts.py b/scripts/netbox_hosts.py
deleted file mode 100644
index e968e8e..0000000
--- a/scripts/netbox_hosts.py
+++ /dev/null
@@ -1,432 +0,0 @@
-#!/usr/bin/env python3
-
-# SPDX-FileCopyrightText: © 2020 Open Networking Foundation <support@opennetworking.org>
-# SPDX-License-Identifier: Apache-2.0
-
-# TODO:
-# Fix issues where IPMI given primary IP for a node
-
-from __future__ import absolute_import
-
-import argparse
-import json
-import logging
-import netaddr
-import re
-import ssl
-import urllib.parse
-import urllib.request
-from ruamel import yaml
-
-# create shared logger
-logging.basicConfig()
-logger = logging.getLogger("nbht")
-
-# global dict of jsonpath expressions -> compiled jsonpath parsers, as
-# reparsing expressions in each loop results in 100x longer execution time
-jpathexpr = {}
-
-# headers to pass, set globally
-headers = []
-
-# settings
-settings = {}
-
-# cached data from API
-devices = {}
-interfaces = {}
-
-
-def parse_nb_args():
- """
- parse CLI arguments
- """
-
- parser = argparse.ArgumentParser(description="NetBox Host Descriptions")
-
- # Positional args
- parser.add_argument(
- "settings",
- type=argparse.FileType("r"),
- help="YAML ansible inventory file w/netbox info",
- )
-
- parser.add_argument(
- "--debug", action="store_true", help="Print additional debugging information"
- )
-
- return parser.parse_args()
-
-
-def json_api_get(
- url,
- headers,
- data=None,
- trim_prefix=False,
- allow_failure=False,
- validate_certs=False,
-):
- """
- Call JSON API endpoint, return data as a dict
- """
-
- logger.debug("json_api_get url: %s", url)
-
- # if data included, encode it as JSON
- if data:
- data_enc = str(json.dumps(data)).encode("utf-8")
-
- request = urllib.request.Request(url, data=data_enc, method="POST")
- request.add_header("Content-Type", "application/json; charset=UTF-8")
- else:
- request = urllib.request.Request(url)
-
- # add headers tuples
- for header in headers:
- request.add_header(*header)
-
- try:
-
- if validate_certs:
- response = urllib.request.urlopen(request)
-
- else:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
-
- response = urllib.request.urlopen(request, context=ctx)
-
- except urllib.error.HTTPError:
- # asking for data that doesn't exist results in a 404, just return nothing
- if allow_failure:
- return None
- logger.exception("Server encountered an HTTPError at URL: '%s'", url)
- except urllib.error.URLError:
- logger.exception("An URLError occurred at URL: '%s'", url)
- else:
- # docs: https://docs.python.org/3/library/json.html
- jsondata = response.read()
- logger.debug("API response: %s", jsondata)
-
- try:
- data = json.loads(jsondata)
- except json.decoder.JSONDecodeError:
- # allow return of no data
- if allow_failure:
- return None
- logger.exception("Unable to decode JSON")
- else:
- logger.debug("JSON decoded: %s", data)
-
- return data
-
-
-def create_dns_zone(extension, devs):
- # Checks for dns entries
-
- a_recs = {} # PTR records created by inverting this
- cname_recs = {}
- srv_recs = {}
- ns_recs = []
- txt_recs = {}
-
- # scan through devs and look for dns_name, if not, make from name and
- # extension
- for name, value in devs.items():
-
- # add DNS entries for every DHCP host if there's a DHCP range
- # DHCP addresses are of the form dhcp###.extension
- if name == "prefix_dhcp":
- for ip in netaddr.IPNetwork(value["dhcp_range"]).iter_hosts():
- a_recs["dhcp%03d" % (ip.words[3])] = str(ip)
-
- continue
-
- # require DNS names to only use ASCII characters
- # (alphanumeric, lowercase, with dash/period)
- # _'s are used in SRV/TXT records, but in general use aren't recommended
- dns_name = re.sub("[^a-z0-9.-]", "-", name.lower(), 0, re.ASCII)
-
- # Add as an A record (and inverse, PTR record), only if it's a new name
- if dns_name not in a_recs:
- a_recs[dns_name] = value["ip4"]
- else:
- # most likely a data entry error
- logger.warning(
- "Duplicate DNS name '%s' for devices at IP: '%s' and '%s', ignoring",
- dns_name,
- a_recs[dns_name],
- value["ip4"],
- )
- continue
-
- # if a DNS name is given as a part of the IP address, it's viewed as a CNAME
- if value["dns_name"]:
-
- if re.search("%s$" % extension, value["dns_name"]):
-
- # strip off the extension, and add as a CNAME
- dns_cname = value["dns_name"].split(".%s" % extension)[0]
-
- elif "." in value["dns_name"]:
- logger.warning(
- "Device '%s' has a IP assigned DNS name '%s' outside "
- + "the prefix extension: '%s', ignoring",
- name,
- value["dns_name"],
- extension,
- )
- continue
-
- else:
- dns_cname = value["dns_name"]
-
- if dns_cname == dns_name:
- logger.warning(
- "DNS Name field '%s' is identical to device name '%s', ignoring",
- value["dns_name"],
- dns_name,
- )
- else:
- cname_recs[dns_cname] = "%s.%s." % (dns_name, extension)
-
- # Add services as cnames, and possibly ns records
- for svc in value["services"]:
-
- # only add service if it uses the IP of the host
- if value["ip4"] in svc["ip4s"]:
- cname_recs[svc["name"]] = "%s.%s." % (dns_name, extension)
-
- if svc["port"] == 53 and svc["protocol"] == "udp":
- ns_recs.append("%s.%s." % (dns_name, extension))
-
- return {
- "a": a_recs,
- "cname": cname_recs,
- "ns": ns_recs,
- "srv": srv_recs,
- "txt": txt_recs,
- }
-
-
-def create_dhcp_subnet(devs):
- # makes DHCP subnet information
-
- hosts = {}
-
- for name, value in devs.items():
-
- # has a MAC address, and it's not null
- if "macaddr" in value and value["macaddr"]:
-
- hosts[value["ip4"]] = {
- "name": name,
- "macaddr": value["macaddr"],
- }
-
- return hosts
-
-
-def get_device_services(device_id, filters=""):
-
- # get services info
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/services/?device_id=%s%s" % (device_id, filters),
- )
-
- raw_svcs = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- services = []
-
- for rsvc in raw_svcs["results"]:
-
- svc = {}
-
- svc["name"] = rsvc["name"]
- svc["description"] = rsvc["description"]
- svc["port"] = rsvc["port"]
- svc["protocol"] = rsvc["protocol"]["value"]
- svc["ip4s"] = []
-
- for ip in rsvc["ipaddresses"]:
- svc["ip4s"].append(str(netaddr.IPNetwork(ip["address"]).ip))
-
- services.append(svc)
-
- return services
-
-
-def get_interface_mac_addr(interface_id):
- # return a mac addres, or None if undefined
-
- # get the interface info
- url = "%s%s" % (settings["api_endpoint"], "api/dcim/interfaces/%s/" % interface_id)
-
- iface = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- if iface["mac_address"]:
- return iface["mac_address"]
-
- return None
-
-
-def get_device_interfaces(device_id, filters=""):
-
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/dcim/interfaces/?device_id=%s%s" % (device_id, filters),
- )
-
- logger.debug("raw_ifaces_url: %s", url)
-
- raw_ifaces = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_ifaces: %s", raw_ifaces)
-
- ifaces = []
-
- for raw_iface in raw_ifaces["results"]:
-
- iface = {}
-
- iface["name"] = raw_iface["name"]
- iface["macaddr"] = raw_iface["mac_address"]
- iface["mgmt_only"] = raw_iface["mgmt_only"]
- iface["description"] = raw_iface["description"]
-
- if raw_iface["count_ipaddresses"]:
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/ip-addresses/?interface_id=%s" % raw_iface["id"],
- )
-
- raw_ip = json_api_get(
- url, headers, validate_certs=settings["validate_certs"]
- )
-
- iface["ip4"] = str(netaddr.IPNetwork(raw_ip["results"][0]["address"]).ip)
-
- ifaces.append(iface)
-
- return ifaces
-
-
-def get_prefix_devices(prefix, filters=""):
-
- # get all devices in a prefix
- url = "%s%s" % (
- settings["api_endpoint"],
- "api/ipam/ip-addresses/?parent=%s%s" % (prefix, filters),
- )
-
- raw_ips = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_ips: %s", raw_ips)
-
- devs = {}
-
- for ip in raw_ips["results"]:
-
- logger.info("ip: %s", ip)
-
- # if it's a DHCP range, add that range to the dev list as prefix_dhcp
- if ip["status"]["value"] == "dhcp":
- devs["prefix_dhcp"] = {"dhcp_range": ip["address"]}
- continue
-
- dev = {}
-
- dev["ip4"] = str(netaddr.IPNetwork(ip["address"]).ip)
- dev["macaddr"] = get_interface_mac_addr(ip["assigned_object"]["id"])
-
- ifaces = get_device_interfaces(
- ip["assigned_object"]["device"]["id"], "&mgmt_only=true"
- )
-
- if ifaces and dev["ip4"] == ifaces[0]["ip4"]: # this is a mgmt IP
- devname = "%s-%s" % (
- ip["assigned_object"]["device"]["name"],
- ifaces[0]["name"],
- )
- dev["dns_name"] = ""
- dev["services"] = []
-
- else: # this is a primary IP
-
- devname = ip["assigned_object"]["device"]["name"]
- dev["dns_name"] = ip["dns_name"] if "dns_name" in ip else "None"
- dev["services"] = get_device_services(ip["assigned_object"]["device"]["id"])
-
- devs[devname] = dev
-
- return devs
-
-
-def get_prefix_data(prefix):
-
- # get all devices in a prefix
- url = "%s%s" % (settings["api_endpoint"], "api/ipam/prefixes/?prefix=%s" % prefix)
-
- raw_prefix = json_api_get(url, headers, validate_certs=settings["validate_certs"])
-
- logger.debug("raw_prefix: %s", raw_prefix)
-
- return raw_prefix["results"][0]
-
-
-# main function that calls other functions
-if __name__ == "__main__":
-
- args = parse_nb_args()
-
- # only print log messages if debugging
- if args.debug:
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
-
- # load settings from yaml file
- settings = yaml.safe_load(args.settings.read())
-
- logger.info("settings: %s" % settings)
-
- # global, so this isn't run multiple times
- headers = [
- ("Authorization", "Token %s" % settings["token"]),
- ]
-
- # create structure from extracted data
-
- dns_global = {}
- dns_zones = {}
- dhcp_global = {}
- dhcp_subnets = {}
-
- for prefix in settings["dns_prefixes"]:
-
- prefix_data = get_prefix_data(prefix)
-
- prefix_domain_extension = prefix_data["description"]
-
- devs = get_prefix_devices(prefix)
-
- dns_zones[prefix_domain_extension] = create_dns_zone(
- prefix_domain_extension, devs
- )
-
- dns_zones[prefix_domain_extension]["ip_range"] = prefix
-
- dhcp_subnets[prefix] = create_dhcp_subnet(devs)
-
- yaml_out = {
- "dns_global": dns_global,
- "dns_zones": dns_zones,
- "dhcp_global": dhcp_global,
- "dhcp_subnets": dhcp_subnets,
- "devs": devs,
- "prefix_data": prefix_data,
- }
-
- print(yaml.safe_dump(yaml_out, indent=2))
diff --git a/scripts/pxeconfig.py b/scripts/pxeconfig.py
index 97aedab..fe9e1a7 100644
--- a/scripts/pxeconfig.py
+++ b/scripts/pxeconfig.py
@@ -11,7 +11,6 @@
from __future__ import absolute_import
import nbhelper
-
from ruamel import yaml
# main function that calls other functions
@@ -28,24 +27,25 @@
}
args = nbhelper.initialize(extra_args)
- tenant = nbhelper.NBTenant()
+ tenant = nbhelper.Tenant()
yaml_out = {}
pxeboot_hosts = []
- prefixes = nbhelper.NBPrefix.all_prefixes()
- devices = nbhelper.NBDevice.all_objects()
-
- for dev_id, device in devices.items():
+ for device in tenant.get_devices():
# only pxeboot for servers
- if device.data["device_role"]["slug"] == "server":
+ if device.data.device_role.slug in ["server", "router"]:
pxe_dev = {}
- pxe_dev["serial"] = device.data["serial"]
pxe_dev["hostname"] = device.data["name"]
pxe_dev["domain"] = args.domain_extension
- pxe_dev["mac_address"] = device.primary_iface()["mac_address"].lower()
+
+ if device.data["serial"]:
+ pxe_dev["serial"] = device.data["serial"]
+
+ if device.primary_iface["mac_address"]:
+ pxe_dev["mac_address"] = device.primary_iface["mac_address"].lower()
pxeboot_hosts.append(pxe_dev)
diff --git a/scripts/tenant_validator.py b/scripts/tenant_validator.py
index 0ce504e..56c6ed7 100644
--- a/scripts/tenant_validator.py
+++ b/scripts/tenant_validator.py
@@ -8,15 +8,15 @@
from __future__ import absolute_import
+import argparse
+import logging
import re
import sys
-import yaml
-import logging
-import argparse
+
+import netaddr
import pynetbox
import requests
-import netaddr
-
+import yaml
logging.basicConfig()
logger = logging.getLogger("TenentValidator")
@@ -37,7 +37,7 @@
)
-class Configuration(object):
+class Configuration:
mapping_dict = {
logging.ERROR: logger.error,
@@ -67,7 +67,7 @@
return ("/".join(obj.url.split("/")[-4:-2]),)
-def validate_vlans(vlans=list()):
+def validate_vlans(vlans=None):
global misconfs
for vlan in vlans:
@@ -79,7 +79,7 @@
misconfs.append(Configuration(vlan, "VLAN tenant isn't set"))
-def validate_prefixes(prefixes=list()):
+def validate_prefixes(prefixes=None):
global misconfs
tenant_dict = dict()
@@ -154,7 +154,7 @@
dhcp_range = dhcp_addr[0].address
ip_addrs = netboxapi.ipam.ip_addresses.filter(parent=dhcp_range)
- ip_addrs = list(filter(lambda ip: ip != dhcp_addr[0], ip_addrs))
+ ip_addrs = ip_addrs.remove(dhcp_addr[0])
if ip_addrs:
misconfs.append(
Configuration(
@@ -165,7 +165,7 @@
)
-def validate_ip_addresses(ip_addresses=list()):
+def validate_ip_addresses(ip_addresses=None):
global misconfs
prefix_dict = dict()
@@ -187,7 +187,7 @@
)
-def validate_interfaces(interfaces=list()):
+def validate_interfaces(interfaces=None):
global misconfs
for interface in interfaces:
@@ -233,7 +233,7 @@
)
-def validate_vrfs(vrfs=list()):
+def validate_vrfs(vrfs=None):
global misconfs
for vrf in vrfs:
@@ -246,7 +246,7 @@
misconfs.append(Configuration(vrf, "VRF doesn't have tenant set"))
-def validate_machines(machines=list()):
+def validate_machines(machines=None):
global misconfs
tenant_info = dict()
@@ -298,7 +298,7 @@
)
-def validate_tenants(tenants=list()):
+def validate_tenants(tenants=None):
global misconfs
for tenant in tenants:
@@ -322,7 +322,7 @@
tenants = list(netboxapi.tenancy.tenants.filter(name=tenant_name))
if len(tenants) == 0:
- logger.critical("Tenant name %s wasn't found in Netbox" % tenant_name)
+ logger.critical("Tenant name %s wasn't found in Netbox", tenant_name)
sys.exit(1)
tenant_id = None if len(tenants) != 1 else tenants[0].id