[VOL-5026] - Build and (pre-) deploy repo:voltha-go

Makefile
--------
  o Refactor test and mod-* targets per voltha-openolt-adatpter/Makefile.
  o Logic moved into makefiles/analysis/{coverage,sca}.mk
  o Inline ( set -euo pipefail && cmd | tee log) in test-coverage.
  o Improve error handling, "cmd | tee" will silently mask shell exit status.
  o make test failing locally (docker image: cpu profile acces denied).
  o Local problem, send a job through jenkins for accurate status.

makefiles/
----------
  o Copy in library makefiles
  o Esp docker/include.mk

compose/*/*.yaml
----------------
  o Update copyright notice

db/*/*.go
rw_core/*/*.go
--------------
  o Run gofmt -s -w on source to fix latent linting problmes that fail jobs.

Change-Id: If7cd349822edd0e604ac4daf27d315f528c6bcf6
diff --git a/Makefile b/Makefile
index 4141898..88cc745 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # -*- makefile -*-
 # -----------------------------------------------------------------------#
 # Copyright 2016-2023 Open Networking Foundation (ONF) and the ONF Contributors
-
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
@@ -15,6 +15,8 @@
 # limitations under the License.
 # -----------------------------------------------------------------------
 
+$(if $(DEBUG),$(warning ENTER))
+
 .DEFAULT_GOAL := help
 
 $(if $(VERBOSE),$(eval export VERBOSE=$(VERBOSE))) # visible to include(s)
@@ -63,15 +65,6 @@
 DOCKER_BUILD_ARGS_LOCAL ?= ${DOCKER_BUILD_ARGS} \
 	--build-arg LOCAL_PROTOS=${LOCAL_PROTOS}
 
-# tool containers
-VOLTHA_TOOLS_VERSION ?= 2.4.0
-
-GO                = docker run --rm --user $$(id -u):$$(id -g) -v ${CURDIR}:/app $(shell test -t 0 && echo "-it") -v gocache:/.cache -v gocache-${VOLTHA_TOOLS_VERSION}:/go/pkg voltha/voltha-ci-tools:${VOLTHA_TOOLS_VERSION}-golang go
-GO_JUNIT_REPORT   = docker run --rm --user $$(id -u):$$(id -g) -v ${CURDIR}:/app -i voltha/voltha-ci-tools:${VOLTHA_TOOLS_VERSION}-go-junit-report go-junit-report
-GOCOVER_COBERTURA = docker run --rm --user $$(id -u):$$(id -g) -v ${CURDIR}:/app/src/github.com/opencord/voltha-go -i voltha/voltha-ci-tools:${VOLTHA_TOOLS_VERSION}-gocover-cobertura gocover-cobertura
-GOLANGCI_LINT     = docker run --rm --user $$(id -u):$$(id -g) -v ${CURDIR}:/app $(shell test -t 0 && echo "-it") -v gocache:/.cache -v gocache-${VOLTHA_TOOLS_VERSION}:/go/pkg voltha/voltha-ci-tools:${VOLTHA_TOOLS_VERSION}-golangci-lint golangci-lint
-HADOLINT          = docker run --rm --user $$(id -u):$$(id -g) -v ${CURDIR}:/app $(shell test -t 0 && echo "-it") voltha/voltha-ci-tools:${VOLTHA_TOOLS_VERSION}-hadolint hadolint
-
 .PHONY: docker-build local-protos local-lib-go help
 .DEFAULT_GOAL := help
 
@@ -79,7 +72,7 @@
 ## Local Development Helpers
 local-protos: ## Copies a local version of the voltha-protos dependency into the vendor directory
 ifdef LOCAL_PROTOS
-	rm -rf vendor/github.com/opencord/voltha-protos/v5/go
+	$(RM) -r vendor/github.com/opencord/voltha-protos/v5/go
 	mkdir -p vendor/github.com/opencord/voltha-protos/v5/go
 	cp -r ${LOCAL_PROTOS}/go/* vendor/github.com/opencord/voltha-protos/v5/go
 endif
@@ -87,7 +80,7 @@
 ## Local Development Helpers
 local-lib-go: ## Copies a local version of the voltha-lib-go dependency into the vendor directory
 ifdef LOCAL_LIB_GO
-	rm -rf vendor/github.com/opencord/voltha-lib-go/v7/pkg
+	$(RM) -r vendor/github.com/opencord/voltha-lib-go/v7/pkg
 	mkdir -p vendor/github.com/opencord/voltha-lib-go/v7/pkg
 	cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v7/pkg/
 endif
@@ -102,10 +95,9 @@
 docker-build-args := $(null)# comment line for debug mode
 
 docker-build: local-protos local-lib-go ## Build core docker image (set BUILD_PROFILED=true to also build the profiled image)
-	@echo "======================================================================="
-	@echo " ** TARGET: $@"
-	@echo "======================================================================="
-	$(MAKE) --no-print-directory init-test-results
+
+	$(call banner-enter,$@)
+	$(MAKE) --no-print-directory test-coverage-init
 
 	docker $(docker-build-args) build $(DOCKER_BUILD_ARGS) -t ${RWCORE_IMAGENAME}:${DOCKER_TAG} --target ${DOCKER_TARGET} -f docker/Dockerfile.rw_core .
 ifdef BUILD_PROFILED
@@ -117,11 +109,16 @@
 	docker build $(DOCKER_BUILD_ARGS) --target dev --build-arg GOLANG_IMAGE=golang:1.13.8-buster --build-arg CGO_PARAMETER="CGO_ENABLED=1" --build-arg DEPLOY_IMAGE=debian:buster-slim --build-arg EXTRA_GO_BUILD_TAGS="--race" -t ${RWCORE_IMAGENAME}:${DOCKER_TAG}-rd -f docker/Dockerfile.rw_core .
 endif
 
+	$(call banner-leave,$@)
+
 ## -----------------------------------------------------------------------
 ## Intent:
 ## -----------------------------------------------------------------------
 docker-push: ## Push the docker images to an external repository
-	docker push ${RWCORE_IMAGENAME}:${DOCKER_TAG}
+
+	$(call banner-enter,$@)
+
+docker push ${RWCORE_IMAGENAME}:${DOCKER_TAG}
 ifdef BUILD_PROFILED
 	docker push ${RWCORE_IMAGENAME}:${DOCKER_TAG}-profile
 endif
@@ -132,7 +129,14 @@
 	@if [ "`kind get clusters | grep voltha-$(TYPE)`" = '' ]; then echo "no voltha-$(TYPE) cluster found" && exit 1; fi
 	kind load docker-image ${RWCORE_IMAGENAME}:${DOCKER_TAG} --name=voltha-$(TYPE) --nodes $(shell kubectl get nodes --template='{{range .items}}{{.metadata.name}},{{end}}' | rev | cut -c 2- | rev)
 
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
 ## lint and unit tests
+## -----------------------------------------------------------------------
+
+# [TODO]
+#   o Merge lint-* targets with repo:openolt-adapter/Makefile
 
 lint-dockerfile: ## Perform static analysis on Dockerfile
 	@echo "Running Dockerfile lint check..."
@@ -146,10 +150,8 @@
 	@git status > /dev/null
 	@git diff-index --quiet HEAD -- go.mod go.sum vendor || (echo "ERROR: Staged or modified files must be committed before running this test" && git status -- go.mod go.sum vendor && exit 1)
 	@[[ `git ls-files --exclude-standard --others go.mod go.sum vendor` == "" ]] || (echo "ERROR: Untracked files must be cleaned up before running this test" && git status -- go.mod go.sum vendor && exit 1)
-	${GO} mod tidy
 
-        # This command is destructive, vendor/ removed
-	${GO} mod vendor
+	$(MAKE) mod-update
 
 	@git status > /dev/null
 	@git diff-index --quiet HEAD -- go.mod go.sum vendor || (echo "ERROR: Modified files detected after running go mod tidy / go mod vendor" && git status -- go.mod go.sum vendor && git checkout -- go.mod go.sum vendor && exit 1)
@@ -160,47 +162,63 @@
 ## -----------------------------------------------------------------------
 lint: lint-mod lint-dockerfile ## Run all lint targets
 
-sca: ## Runs static code analysis with the golangci-lint tool
-	@$(RM) -r ./sca-report
-	@mkdir -p ./sca-report
-	@echo "Running static code analysis..."
-	@${GOLANGCI_LINT} run --deadline=6m --out-format junit-xml ./... \
-	    | tee ./sca-report/sca-report.xml
-	@echo ""
-	@echo "Static code analysis OK"
+include $(MAKEDIR)/analysis/include.mk
 
 ## -----------------------------------------------------------------------
 ## -----------------------------------------------------------------------
 tests-dir      := ./tests/results
 tests-coverage := $(tests-dir)/go-test-coverage
 tests-results  := $(tests-dir)/go-test-results
-test: local-lib-go ## Run unit tests
-	$(MAKE) --no-print-directory init-test-results
-	$(HIDE)${GO} test -mod=vendor -v -coverprofile $(tests-coverage).out -covermode count ./... 2>&1 | tee $(tests-results).out ;\
-	RETURN=$$? ;\
-	${GO_JUNIT_REPORT} < $(tests-results).out > $(tests-results).xml ;\
-	${GOCOVER_COBERTURA} < $(tests-coverage).out > $(tests-coverage).xml ;\
-	exit $$RETURN
+
+test :: test-coverage ## Run unit tests
+test-coverage : local-lib-go
 
 clean :: distclean ## Removes any local filesystem artifacts generated by a build
 
 distclean sterile :: ## Removes any local filesystem artifacts generated by a build or test run
 	$(RM) -r ./sca-report
 
-mod-update: ## Update go mod files
-	${GO} mod tidy
-	${GO} mod vendor
-
 fmt: ## Formats the soure code to go best practice style
+#	gofmt -s -w $(PACKAGES)
 	@go fmt ${PACKAGES}
 
-## ---------------------------------------------------------------------------
-## Intent: Prep work, test -coverprofile fails w/o a file on disk ?!?
-## ---------------------------------------------------------------------------
-init-test-results:
-	@$(RM) -r tests/results
-	@mkdir -p tests/results
-	@touch $(tests-coverage).out
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+.PHONY: mod-update
+mod-update: mod-tidy mod-vendor
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+.PHONY: mod-tidy
+mod-tidy:
+	$(call banner-enter,$@)
+	${GO} mod tidy
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## Intent: Refresh vendor/ directory package source
+## -----------------------------------------------------------------------
+##   Note: This target is destructive, vendor/ directory will be removed.
+##   Todo: Update logic to checkout version on demand VS checkin a static
+##         copy of vendor/ sources then augment.  Logically removal of
+##         files under revision control is strange.
+## -----------------------------------------------------------------------
+.PHONY: mod-vendor
+mod-vendor:
+	$(call banner-enter,$@)
+	@$(if $(LOCAL_FIX_PERMS),chmod 777 .)
+	${GO} mod vendor
+	@$(if $(LOCAL_FIX_PERMS),chmod 755 .)
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+help ::
+	@echo '[MOD UPDATE]'
+	@echo '  mod-update'
+	@echo '    LOCAL_FIX_PERMS=1    Hack to fix docker filesystem access problems'
+	@echo '  mod-tidy'
+	@echo '  mod-vendor'
 
 ## ---------------------------------------------------------------------------
 # For each makefile target, add ## <description> on the target line and it will be listed by 'make help'
@@ -212,11 +230,12 @@
 	@echo "  versions    Display version-by-tool used while building"
   ifdef VERBOSE
 	@echo
-	@echo "  init-test-results    Massage tests/results to fix coverage reporting"
   endif
 	@echo
 	@grep --no-filename '^[[:alpha:]_-]*:.* ##' $(MAKEFILE_LIST) \
 		| sort \
 		| awk 'BEGIN {FS=":.* ## "}; {printf "%-25s : %s\n", $$1, $$2};'
 
+$(if $(DEBUG),$(warning LEAVE))
+
 # [EOF]
diff --git a/compose/adapters-ponsim.yml b/compose/adapters-ponsim.yml
index 86a30d2..9a5a5c6 100644
--- a/compose/adapters-ponsim.yml
+++ b/compose/adapters-ponsim.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/adapters-simulated.yml b/compose/adapters-simulated.yml
index 4d2a3b7..54e21cf 100644
--- a/compose/adapters-simulated.yml
+++ b/compose/adapters-simulated.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/docker-compose-etcd.yml b/compose/docker-compose-etcd.yml
index 9c11c3c..04cb899 100644
--- a/compose/docker-compose-etcd.yml
+++ b/compose/docker-compose-etcd.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/docker-compose-zk-kafka-test.yml b/compose/docker-compose-zk-kafka-test.yml
index dac9216..1ca1a5b 100644
--- a/compose/docker-compose-zk-kafka-test.yml
+++ b/compose/docker-compose-zk-kafka-test.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/ofagent.yml b/compose/ofagent.yml
index a54b610..af4f26a 100644
--- a/compose/ofagent.yml
+++ b/compose/ofagent.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2019 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/openolt.yml b/compose/openolt.yml
index 915dbca..cda7d74 100644
--- a/compose/openolt.yml
+++ b/compose/openolt.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/ponsim_olt.yml b/compose/ponsim_olt.yml
index 0ff5ca7..b88c185 100644
--- a/compose/ponsim_olt.yml
+++ b/compose/ponsim_olt.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/ponsim_onu.yml b/compose/ponsim_onu.yml
index 216df56..7b6e30b 100644
--- a/compose/ponsim_onu.yml
+++ b/compose/ponsim_onu.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/rw_core.yml b/compose/rw_core.yml
index 49d24f9..63ad263 100644
--- a/compose/rw_core.yml
+++ b/compose/rw_core.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/rw_core_concurrency_test.yml b/compose/rw_core_concurrency_test.yml
index c672cfd..e43d160 100644
--- a/compose/rw_core_concurrency_test.yml
+++ b/compose/rw_core_concurrency_test.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2019 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/system-test-bbsim.yml b/compose/system-test-bbsim.yml
index 850dadd..58e5761 100644
--- a/compose/system-test-bbsim.yml
+++ b/compose/system-test-bbsim.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -195,4 +196,4 @@
       - "18120:18120"
     networks:
       - default
-    restart: unless-stopped
\ No newline at end of file
+    restart: unless-stopped
diff --git a/compose/system-test-ponsim.yml b/compose/system-test-ponsim.yml
index af23668..b944102 100644
--- a/compose/system-test-ponsim.yml
+++ b/compose/system-test-ponsim.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/compose/system-test.yml b/compose/system-test.yml
index 4923889..0973718 100644
--- a/compose/system-test.yml
+++ b/compose/system-test.yml
@@ -1,4 +1,5 @@
 ---
+# Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/config.mk b/config.mk
index 1f441ea..58a1a66 100644
--- a/config.mk
+++ b/config.mk
@@ -32,6 +32,6 @@
 ##---------------------------------##
 ##---] Conditional make logic  [---##
 ##---------------------------------##
-# USE_DOCKER_MK    := true
+USE_DOCKER_MK    := true
 
 # [EOF]
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 16c3632..a421f67 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -1,17 +1,17 @@
 /*
- * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
+* Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
 
- * http://www.apache.org/licenses/LICENSE-2.0
+* http://www.apache.org/licenses/LICENSE-2.0
 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
  */
 package model
 
diff --git a/docker/Dockerfile.rw_core b/docker/Dockerfile.rw_core
index 94c24ba..ebc79c3 100644
--- a/docker/Dockerfile.rw_core
+++ b/docker/Dockerfile.rw_core
@@ -47,7 +47,14 @@
 COPY db ./db
 COPY vendor ./vendor
 
-# go test -coverprofile fails w/o this.
+# -----------------------------------------------------------------------
+# Chicken-n-egg problem: See Dockerfile.rw_core
+#   required by testing (-coverprofile fails w/o this)
+#   test output, does not exist during build
+# -----------------------------------------------------------------------
+# [TODO] Create two distinct docker files to break the artifical
+#        dependency loop
+# -----------------------------------------------------------------------
 COPY tests/results/go-test-coverage.out ./tests/results/go-test-coverage.out
 
 # Copy files
diff --git a/makefiles/analysis/coverage.mk b/makefiles/analysis/coverage.mk
new file mode 100644
index 0000000..2505c65
--- /dev/null
+++ b/makefiles/analysis/coverage.mk
@@ -0,0 +1,117 @@
+# -*- makefile -*-
+# -----------------------------------------------------------------------
+# Copyright 2016-2023 Open Networking Foundation (ONF) and the ONF Contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -----------------------------------------------------------------------
+
+$(if $(DEBUG),$(warning ENTER))
+
+# tests-dir      := ./tests/results
+# tests-coverage := $(tests-dir)/go-test-coverage
+# tests-results  := $(tests-dir)/go-test-results
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+test-coverage-init:
+	$(RM) -r tests/results
+	@mkdir -p ./tests/results
+
+        # Chicken-n-egg problem: See Dockerfile.rw_core
+        #   required by testing
+        #   does not exist during build
+	@touch ./tests/results/go-test-coverage.out
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+test-coverage: test-coverage-init
+
+	$(call banner-enter,$@)
+
+	$(RM) -r tests/results
+	@mkdir -p ./tests/results
+	@touch $(tests-coverage).out
+
+	@$(if $(LOCAL_FIX_PERMS),chmod 777 tests/results)
+
+	$(HIDE) $(MAKE) --no-print-directory test-go-coverage
+	$(HIDE) $(MAKE) --no-print-directory test-junit
+	$(HIDE) $(MAKE) --no-print-directory test-cobertura
+
+	@$(if $(LOCAL_FIX_PERMS),chmod 775 tests/results) # yes this may not run
+
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+test-go-coverage:
+	$(call banner-enter,$@)
+	@$(if $(LOCAL_FIX_PERMS),chmod 777 tests/results)
+
+        # Cannot simply tee output else go exit status lost
+	(\
+  set -euo pipefail\
+    && ${GO} test -mod=vendor -v -coverprofile "./tests/results/go-test-coverage.out" -covermode count ./... 2>&1\
+) | tee ./tests/results/go-test-results.out
+
+	@$(if $(LOCAL_FIX_PERMS),chmod 775 tests/results)
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+test-junit:
+	$(call banner-enter,$@)
+	@$(if $(LOCAL_FIX_PERMS),chmod 777 tests/results)
+
+	${GO_JUNIT_REPORT} \
+	    < ./tests/results/go-test-results.out \
+	    > ./tests/results/go-test-results.xml
+
+	@$(if $(LOCAL_FIX_PERMS),chmod 775 tests/results)
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+test-cobertura:
+	$(call banner-enter,$@)
+	@$(if $(LOCAL_FIX_PERMS),chmod 777 tests/results)
+
+	${GOCOVER_COBERTURA} \
+	    < ./tests/results/go-test-coverage.out \
+	    > ./tests/results/go-test-coverage.xml
+
+	@$(if $(LOCAL_FIX_PERMS),chmod 775 tests/results)
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+help ::
+	@echo '[TEST: coverage]'
+	@echo '  coverage               Generate test coverage reports'
+	@echo '  test-go-coverage       Generate a coverage report for vendor/'
+	@echo '  test-junit             Digest go coverage, generate junit'
+	@echo '  test-cobertura         Digest coverage and junit reports'
+
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+clean-coverage :
+	$(RM) -r ./tests/results
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+clean :: clean-coverage
+
+$(if $(DEBUG),$(warning LEAVE))
+
+# [EOF]
diff --git a/makefiles/analysis/include.mk b/makefiles/analysis/include.mk
new file mode 100644
index 0000000..f6e6a78
--- /dev/null
+++ b/makefiles/analysis/include.mk
@@ -0,0 +1,28 @@
+# -*- makefile -*-
+# -----------------------------------------------------------------------
+# Copyright 2016-2023 Open Networking Foundation (ONF) and the ONF Contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-FileCopyrightText: 2016-2023 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-License-Identifier: Apache-2.0
+# -----------------------------------------------------------------------
+
+$(if $(DEBUG),$(warning ENTER))
+
+include $(MAKEDIR)/analysis/sca.mk
+include $(MAKEDIR)/analysis/coverage.mk
+
+$(if $(DEBUG),$(warning LEAVE))
+
+# [EOF]
diff --git a/makefiles/analysis/sca.mk b/makefiles/analysis/sca.mk
new file mode 100644
index 0000000..19d7f91
--- /dev/null
+++ b/makefiles/analysis/sca.mk
@@ -0,0 +1,52 @@
+# -*- makefile -*-
+# -----------------------------------------------------------------------
+# Copyright 2016-2023 Open Networking Foundation (ONF) and the ONF Contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -----------------------------------------------------------------------
+
+$(if $(DEBUG),$(warning ENTER))
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+sca:
+	$(call banner-enter,$@)
+
+	@$(RM) -r ./sca-report
+	@mkdir -p ./sca-report
+	@echo "Running static code analysis..."
+	@${GOLANGCI_LINT} run --deadline=6m --out-format junit-xml ./... \
+	    | tee ./sca-report/sca-report.xml
+	@echo ""
+	@echo "Static code analysis OK"
+
+	$(call banner-leave,$@)
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+clean-sca :
+	@$(RM) -r ./sca-report
+	$(RM) ./sca-report/sca-report.xml
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+clean :: clean-sca
+
+## -----------------------------------------------------------------------
+## -----------------------------------------------------------------------
+help ::
+	@echo '  sca              Runs static code analysis with the golangci-lint tool'
+
+$(if $(DEBUG),$(warning LEAVE))
+
+# [EOF]
diff --git a/makefiles/docker/include.mk b/makefiles/docker/include.mk
index a56a85f..3326b27 100644
--- a/makefiles/docker/include.mk
+++ b/makefiles/docker/include.mk
@@ -19,14 +19,12 @@
 
 VOLTHA_TOOLS_VERSION ?= 2.4.0
 
-include $(MAKEDIR)/docker/versions.mk
-
 # ---------------------------
 # Macros: command refactoring
 # ---------------------------
 docker-iam     ?= --user $$(id -u):$$(id -g)#          # override for local use
 docker-run     = docker run --rm $(docker-iam)#        # Docker command stem
-docker-run-is  = $(docker-run) $(is-stdin)             # Attach streams when interactive
+docker-run-is  = $(docker-run) $(is-stdin)#            # Attach streams when interactive
 docker-run-app = $(docker-run-is) -v ${CURDIR}:/app#   # w/filesystem mount
 
 # -----------------------------------------------------------------------
@@ -67,6 +65,23 @@
 endif
 PROTOC_SH += $(vee-citools)-protoc sh -c
 
+# Usage: GO_JUNIT_REPORT := $(call get-docker-go-junit-repo)
+# get-docker-go-junit-repo = $(docker-run-app) $(vee-citools)-go-junit-report go-junit-report
+# GO_JUNIT_REPORT   ?= $(call get-docker-go-junit-repo)
+
+# Usage: GOCOVER_COBERTURA := $(call get-docker-gocover-cobertura)
+# get-docker-gocover-cobertura = $(docker-run-app)/src/github.com/opencord/voltha-openolt-adapter $(vee-citools)-gocover-cobertura gocover-cobertura
+# GOCOVER_COBERTURA ?= $(call get-docker-gocover-cobertura)
+
+GO_JUNIT_REPORT   = $(docker-run) -v ${CURDIR}:/app -i $(vee-citools)-go-junit-report go-junit-report
+GOCOVER_COBERTURA = $(docker-run) -v ${CURDIR}:/app/src/github.com/opencord/voltha-openolt-adapter -i $(vee-citools)-gocover-cobertura gocover-cobertura
+
+get-golangci-lint = $(docker-run-app) -v gocache:/.cache $(vee-golang) $(vee-citools)-golangci-lint golangci-lint
+GOLANGCI_LINT     ?= $(call get-golangci-lint)
+
+get-docker-hadolint = $(docker-run-app) $(vee-citools)-hadolint hadolint
+HADOLINT          ?= $(call get-docker-hadolint)
+
 $(if $(DEBUG),$(warning LEAVE))
 
 # [EOF]
diff --git a/makefiles/include.mk b/makefiles/include.mk
index 2e4af37..c82b882 100644
--- a/makefiles/include.mk
+++ b/makefiles/include.mk
@@ -32,6 +32,7 @@
 onf-mk-abs    ?= $(abspath $(lastword $(MAKEFILE_LIST)))
 onf-mk-top    := $(subst /include.mk,$(null),$(onf-mk-abs))
 ONF_MAKEDIR   := $(onf-mk-top)
+MAKEDIR	      := $(ONF_MAKEDIR)
 
 include $(ONF_MAKEDIR)/consts.mk
 include $(ONF_MAKEDIR)/help/include.mk       # render target help
@@ -49,7 +50,7 @@
 ##---------------------##
 ##---]  ON_DEMAND  [---##
 ##---------------------##
-$(if $(USE_DOCKER_MK),$(eval $(ONF_MAKEDIR)/docker/include.mk))
+$(if $(USE_DOCKER_MK),$(eval include $(ONF_MAKEDIR)/docker/include.mk))
 
 ##-------------------##
 ##---]  TARGETS  [---##
diff --git a/makefiles/lint/include.mk b/makefiles/lint/include.mk
index 95059fe..17efafa 100644
--- a/makefiles/lint/include.mk
+++ b/makefiles/lint/include.mk
@@ -1,6 +1,6 @@
 # -*- makefile -*-
 # -----------------------------------------------------------------------
-# Copyright 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# Copyright 2012-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # -----------------------------------------------------------------------
 # https://gerrit.opencord.org/plugins/gitiles/onf-make
 # ONF.makefile.version = 1.1
diff --git a/makefiles/python/include.mk b/makefiles/python/include.mk
index 70b5ca1..f46aea0 100644
--- a/makefiles/python/include.mk
+++ b/makefiles/python/include.mk
@@ -1,6 +1,6 @@
 # -*- makefile -*-
 # -----------------------------------------------------------------------
-# Copyright 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/python/test/include.mk b/makefiles/python/test/include.mk
index 12afc3e..026fe68 100644
--- a/makefiles/python/test/include.mk
+++ b/makefiles/python/test/include.mk
@@ -1,6 +1,6 @@
 # -*- makefile -*-
 # -----------------------------------------------------------------------
-# Copyright 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/targets/check.mk b/makefiles/targets/check.mk
index 2145343..aa1b0d0 100644
--- a/makefiles/targets/check.mk
+++ b/makefiles/targets/check.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/targets/clean.mk b/makefiles/targets/clean.mk
index f504dec..f787e5c 100644
--- a/makefiles/targets/clean.mk
+++ b/makefiles/targets/clean.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/targets/sterile.mk b/makefiles/targets/sterile.mk
index 1eb7035..c661f53 100644
--- a/makefiles/targets/sterile.mk
+++ b/makefiles/targets/sterile.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/targets/test.mk b/makefiles/targets/test.mk
index d70fa4a..2f68086 100644
--- a/makefiles/targets/test.mk
+++ b/makefiles/targets/test.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/targets/tox.mk b/makefiles/targets/tox.mk
index b3a638b..44da921 100644
--- a/makefiles/targets/tox.mk
+++ b/makefiles/targets/tox.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/makefiles/utils/include.mk b/makefiles/utils/include.mk
index 84f684b..5a7678f 100644
--- a/makefiles/utils/include.mk
+++ b/makefiles/utils/include.mk
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# SPDX-FileCopyrightText: 2022 Open Networking Foundation (ONF) and the ONF Contributors
+# SPDX-FileCopyrightText: 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
 # SPDX-License-Identifier: Apache-2.0
 # -----------------------------------------------------------------------
 
diff --git a/rw_core/core/adapter/endpoint_manager_test.go b/rw_core/core/adapter/endpoint_manager_test.go
index 63ed57e..0edb4d2 100644
--- a/rw_core/core/adapter/endpoint_manager_test.go
+++ b/rw_core/core/adapter/endpoint_manager_test.go
@@ -1,17 +1,17 @@
 /*
- * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
+* Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
 
- * http://www.apache.org/licenses/LICENSE-2.0
+* http://www.apache.org/licenses/LICENSE-2.0
 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
  */
 package adapter
 
diff --git a/rw_core/core/adapter/manager.go b/rw_core/core/adapter/manager.go
index d73da79..3dfd8a6 100644
--- a/rw_core/core/adapter/manager.go
+++ b/rw_core/core/adapter/manager.go
@@ -275,7 +275,7 @@
 	return nil
 }
 
-//loadAdaptersAndDevicetypesInMemory loads the existing set of adapters and device types in memory
+// loadAdaptersAndDevicetypesInMemory loads the existing set of adapters and device types in memory
 func (aMgr *Manager) loadAdaptersAndDevicetypesInMemory(ctx context.Context) error {
 	// Load the adapters
 	var adapters []*voltha.Adapter
diff --git a/rw_core/core/device/agent.go b/rw_core/core/device/agent.go
index d6430af..bc3eaed 100755
--- a/rw_core/core/device/agent.go
+++ b/rw_core/core/device/agent.go
@@ -85,7 +85,7 @@
 	transientStateLoader *transientstate.Loader
 }
 
-//newAgent creates a new device agent. The device will be initialized when start() is called.
+// newAgent creates a new device agent. The device will be initialized when start() is called.
 func newAgent(device *voltha.Device, deviceMgr *Manager, dbPath *model.Path, deviceProxy *model.Proxy, internalTimeout, rpcTimeout, flowTimeout time.Duration) *Agent {
 	deviceID := device.Id
 	if deviceID == "" {
@@ -319,7 +319,8 @@
 }
 
 // onDeleteFailure is a common callback for scenarios where we receive an error response following a delete request
-//  to an adapter and the only action required is to return the error response.
+//
+//	to an adapter and the only action required is to return the error response.
 func (agent *Agent) onDeleteFailure(ctx context.Context, err error, prevState, currState *common.AdminState_Types) {
 	logger.Errorw(ctx, "rpc-failed", log.Fields{"rpc": coreutils.GetRPCMetadataFromContext(ctx), "device-id": agent.deviceID, "error": err})
 
@@ -483,8 +484,8 @@
 	return nil
 }
 
-//addFlowsAndGroups adds the "newFlows" and "newGroups" from the existing flows/groups and sends the update to the
-//adapters
+// addFlowsAndGroups adds the "newFlows" and "newGroups" from the existing flows/groups and sends the update to the
+// adapters
 func (agent *Agent) addFlowsAndGroups(ctx context.Context, newFlows []*ofp.OfpFlowStats, newGroups []*ofp.OfpGroupEntry, flowMetadata *ofp.FlowMetadata) error {
 	var flwResponse, grpResponse coreutils.Response
 	var err error
@@ -503,8 +504,8 @@
 	return nil
 }
 
-//deleteFlowsAndGroups removes the "flowsToDel" and "groupsToDel" from the existing flows/groups and sends the update to the
-//adapters
+// deleteFlowsAndGroups removes the "flowsToDel" and "groupsToDel" from the existing flows/groups and sends the update to the
+// adapters
 func (agent *Agent) deleteFlowsAndGroups(ctx context.Context, flowsToDel []*ofp.OfpFlowStats, groupsToDel []*ofp.OfpGroupEntry, flowMetadata *ofp.FlowMetadata) error {
 	var flwResponse, grpResponse coreutils.Response
 	var err error
@@ -521,8 +522,8 @@
 	return nil
 }
 
-//updateFlowsAndGroups replaces the existing flows and groups with "updatedFlows" and "updatedGroups" respectively. It
-//also sends the updates to the adapters
+// updateFlowsAndGroups replaces the existing flows and groups with "updatedFlows" and "updatedGroups" respectively. It
+// also sends the updates to the adapters
 func (agent *Agent) updateFlowsAndGroups(ctx context.Context, updatedFlows []*ofp.OfpFlowStats, updatedGroups []*ofp.OfpGroupEntry, flowMetadata *ofp.FlowMetadata) error {
 	var flwResponse, grpResponse coreutils.Response
 	var err error
@@ -539,7 +540,7 @@
 	return nil
 }
 
-//disableDevice disable a device
+// disableDevice disable a device
 func (agent *Agent) disableDevice(ctx context.Context) error {
 	var err error
 	var desc string
diff --git a/rw_core/core/device/agent_flow.go b/rw_core/core/device/agent_flow.go
index 85c92f1..21fdef0 100644
--- a/rw_core/core/device/agent_flow.go
+++ b/rw_core/core/device/agent_flow.go
@@ -377,7 +377,7 @@
 	return response, nil
 }
 
-//filterOutFlows removes flows from a device using the uni-port as filter
+// filterOutFlows removes flows from a device using the uni-port as filter
 func (agent *Agent) filterOutFlows(ctx context.Context, uniPort uint32, flowMetadata *ofp.FlowMetadata) error {
 	var flowsToDelete []*ofp.OfpFlowStats
 	// If an existing flow has the uniPort as an InPort or OutPort or as a Tunnel ID then it needs to be removed
@@ -406,7 +406,7 @@
 	return nil
 }
 
-//deleteAllFlows deletes all flows in the device table
+// deleteAllFlows deletes all flows in the device table
 func (agent *Agent) deleteAllFlows(ctx context.Context) error {
 	logger.Debugw(ctx, "deleteAllFlows", log.Fields{"device-id": agent.deviceID})
 
diff --git a/rw_core/core/device/flow/cache.go b/rw_core/core/device/flow/cache.go
index c600c21..25f0244 100644
--- a/rw_core/core/device/flow/cache.go
+++ b/rw_core/core/device/flow/cache.go
@@ -135,7 +135,8 @@
 
 // ListIDs returns a snapshot of all the managed flow IDs
 // TODO: iterating through flows safely is expensive now, since all flows are stored & locked separately
-//       should avoid this where possible
+//
+//	should avoid this where possible
 func (cache *Cache) ListIDs() map[uint64]struct{} {
 	cache.lock.RLock()
 	defer cache.lock.RUnlock()
diff --git a/rw_core/core/device/group/cache.go b/rw_core/core/device/group/cache.go
index 49686ca..eb568cf 100644
--- a/rw_core/core/device/group/cache.go
+++ b/rw_core/core/device/group/cache.go
@@ -135,7 +135,8 @@
 
 // ListIDs returns a snapshot of all the managed group IDs
 // TODO: iterating through groups safely is expensive now, since all groups are stored & locked separately
-//       should avoid this where possible
+//
+//	should avoid this where possible
 func (cache *Cache) ListIDs() map[uint32]struct{} {
 	cache.lock.RLock()
 	defer cache.lock.RUnlock()
diff --git a/rw_core/core/device/logical_agent_flow.go b/rw_core/core/device/logical_agent_flow.go
index 4fa0090..7ec3b43 100644
--- a/rw_core/core/device/logical_agent_flow.go
+++ b/rw_core/core/device/logical_agent_flow.go
@@ -48,7 +48,7 @@
 	return flows
 }
 
-//updateFlowTable updates the flow table of that logical device
+// updateFlowTable updates the flow table of that logical device
 func (agent *LogicalAgent) updateFlowTable(ctx context.Context, flow *ofp.FlowTableUpdate) error {
 	logger.Debug(ctx, "update-flow-table")
 	if flow == nil {
@@ -71,7 +71,7 @@
 		"unhandled-command: lDeviceId:%s, command:%s", agent.logicalDeviceID, flow.FlowMod.GetCommand())
 }
 
-//flowAdd adds a flow to the flow table of that logical device
+// flowAdd adds a flow to the flow table of that logical device
 func (agent *LogicalAgent) flowAdd(ctx context.Context, flowUpdate *ofp.FlowTableUpdate) error {
 	mod := flowUpdate.FlowMod
 	logger.Debugw(ctx, "flow-add", log.Fields{"flow": mod})
@@ -271,7 +271,7 @@
 	return nil
 }
 
-//flowDelete deletes a flow from the flow table of that logical device
+// flowDelete deletes a flow from the flow table of that logical device
 func (agent *LogicalAgent) flowDelete(ctx context.Context, flowUpdate *ofp.FlowTableUpdate) error {
 	logger.Debug(ctx, "flow-delete")
 	mod := flowUpdate.FlowMod
@@ -406,7 +406,7 @@
 	return nil
 }
 
-//flowDeleteStrict deletes a flow from the flow table of that logical device
+// flowDeleteStrict deletes a flow from the flow table of that logical device
 func (agent *LogicalAgent) flowDeleteStrict(ctx context.Context, flowUpdate *ofp.FlowTableUpdate) error {
 	var flowHandle *flow.Handle
 	var have bool
@@ -528,12 +528,12 @@
 	return nil
 }
 
-//flowModify modifies a flow from the flow table of that logical device
+// flowModify modifies a flow from the flow table of that logical device
 func (agent *LogicalAgent) flowModify(flowUpdate *ofp.FlowTableUpdate) error {
 	return errors.New("flowModify not implemented")
 }
 
-//flowModifyStrict deletes a flow from the flow table of that logical device
+// flowModifyStrict deletes a flow from the flow table of that logical device
 func (agent *LogicalAgent) flowModifyStrict(flowUpdate *ofp.FlowTableUpdate) error {
 	return errors.New("flowModifyStrict not implemented")
 }
diff --git a/rw_core/core/device/logical_agent_group.go b/rw_core/core/device/logical_agent_group.go
index 261b0a5..2c855de 100644
--- a/rw_core/core/device/logical_agent_group.go
+++ b/rw_core/core/device/logical_agent_group.go
@@ -43,7 +43,7 @@
 	return groups
 }
 
-//updateGroupTable updates the group table of that logical device
+// updateGroupTable updates the group table of that logical device
 func (agent *LogicalAgent) updateGroupTable(ctx context.Context, groupMod *ofp.OfpGroupMod) error {
 	logger.Debug(ctx, "update-group-table")
 	if groupMod == nil {
diff --git a/rw_core/core/device/logical_agent_port.go b/rw_core/core/device/logical_agent_port.go
index 54fa620..e531bd5 100644
--- a/rw_core/core/device/logical_agent_port.go
+++ b/rw_core/core/device/logical_agent_port.go
@@ -424,8 +424,10 @@
 }
 
 // TODO: shouldn't need to guarantee event ordering like this
-//       event ordering should really be protected by per-LogicalPort lock
-//       once routing uses on-demand calculation only, this should be changed
+//
+//	event ordering should really be protected by per-LogicalPort lock
+//	once routing uses on-demand calculation only, this should be changed
+//
 // assignQueuePosition ensures that no events will be sent until this thread calls send() on the returned queuePosition
 func (e *orderedEvents) assignQueuePosition() queuePosition {
 	e.mutex.Lock()
@@ -498,7 +500,7 @@
 	return 0, status.Error(codes.NotFound, "No NNI port found")
 }
 
-//GetNNIPorts returns all NNI ports
+// GetNNIPorts returns all NNI ports
 func (agent *LogicalAgent) GetNNIPorts() map[uint32]struct{} {
 	return agent.portLoader.ListIDsForDevice(agent.rootDeviceID)
 }
diff --git a/rw_core/core/device/logical_agent_route.go b/rw_core/core/device/logical_agent_route.go
index 88cf105..bd6c5c1 100644
--- a/rw_core/core/device/logical_agent_route.go
+++ b/rw_core/core/device/logical_agent_route.go
@@ -90,7 +90,7 @@
 	return agent.deviceRoutes
 }
 
-//rebuildRoutes rebuilds the device routes
+// rebuildRoutes rebuilds the device routes
 func (agent *LogicalAgent) buildRoutes(ctx context.Context) error {
 	logger.Debugf(ctx, "building-routes", log.Fields{"logical-device-id": agent.logicalDeviceID})
 	if err := agent.requestQueue.WaitForGreenLight(ctx); err != nil {
@@ -118,7 +118,7 @@
 	return nil
 }
 
-//updateRoutes updates the device routes
+// updateRoutes updates the device routes
 func (agent *LogicalAgent) updateRoutes(ctx context.Context, deviceID string, devicePorts map[uint32]*voltha.Port, lp *voltha.LogicalPort, lps map[uint32]*voltha.LogicalPort) error {
 	logger.Debugw(ctx, "updateRoutes", log.Fields{"logical-device-id": agent.logicalDeviceID, "device-id": deviceID, "port:": lp})
 
@@ -131,7 +131,7 @@
 	return nil
 }
 
-//updateAllRoutes updates the device routes using all the logical ports on that device
+// updateAllRoutes updates the device routes using all the logical ports on that device
 func (agent *LogicalAgent) updateAllRoutes(ctx context.Context, deviceID string, devicePorts map[uint32]*voltha.Port) error {
 	logger.Debugw(ctx, "updateAllRoutes", log.Fields{"logical-device-id": agent.logicalDeviceID, "device-id": deviceID, "ports-count": len(devicePorts)})
 
diff --git a/rw_core/core/device/logical_manager.go b/rw_core/core/device/logical_manager.go
index 8fe04a9..1e3e386 100644
--- a/rw_core/core/device/logical_manager.go
+++ b/rw_core/core/device/logical_manager.go
@@ -119,7 +119,7 @@
 	return nil, status.Errorf(codes.NotFound, "%s", id)
 }
 
-//ListLogicalDevices returns the list of all logical devices
+// ListLogicalDevices returns the list of all logical devices
 func (ldMgr *LogicalManager) ListLogicalDevices(ctx context.Context, _ *empty.Empty) (*voltha.LogicalDevices, error) {
 	ctx = utils.WithRPCMetadataContext(ctx, "ListLogicalDevices")
 	logger.Debug(ctx, "list-all-logical-devices")
@@ -202,7 +202,7 @@
 	return ldID
 }
 
-//getLogicalDeviceFromModel retrieves the logical device data from the model.
+// getLogicalDeviceFromModel retrieves the logical device data from the model.
 func (ldMgr *LogicalManager) getLogicalDeviceFromModel(ctx context.Context, lDeviceID string) (*voltha.LogicalDevice, error) {
 	logicalDevice := &voltha.LogicalDevice{}
 	if have, err := ldMgr.ldProxy.Get(ctx, lDeviceID, logicalDevice); err != nil {
diff --git a/rw_core/core/device/logical_port/loader.go b/rw_core/core/device/logical_port/loader.go
index 61935aa..90bd172 100644
--- a/rw_core/core/device/logical_port/loader.go
+++ b/rw_core/core/device/logical_port/loader.go
@@ -180,7 +180,8 @@
 
 // ListIDs returns a snapshot of all the managed port IDs
 // TODO: iterating through ports safely is expensive now, since all ports are stored & locked separately
-//       should avoid this where possible
+//
+//	should avoid this where possible
 func (loader *Loader) ListIDs() map[uint32]struct{} {
 	loader.lock.RLock()
 	defer loader.lock.RUnlock()
diff --git a/rw_core/core/device/manager.go b/rw_core/core/device/manager.go
index e8ffacf..3dd5ea9 100755
--- a/rw_core/core/device/manager.go
+++ b/rw_core/core/device/manager.go
@@ -61,7 +61,7 @@
 	doneCh                  chan struct{}
 }
 
-//NewManagers creates the Manager and the Logical Manager.
+// NewManagers creates the Manager and the Logical Manager.
 func NewManagers(dbPath *model.Path, adapterMgr *adapter.Manager, cf *config.RWCoreFlags, coreInstanceID string, eventProxy *events.EventProxy) (*Manager, *LogicalManager) {
 	deviceMgr := &Manager{
 		rootDevices:             make(map[string]bool),
@@ -228,7 +228,7 @@
 	return exist
 }
 
-//isParentDeviceExist checks whether device is already preprovisioned.
+// isParentDeviceExist checks whether device is already preprovisioned.
 func (dMgr *Manager) isParentDeviceExist(ctx context.Context, newDevice *voltha.Device) (bool, error) {
 	hostPort := newDevice.GetHostAndPort()
 	var devices []*voltha.Device
@@ -251,7 +251,7 @@
 	return false, nil
 }
 
-//getDeviceFromModelretrieves the device data from the model.
+// getDeviceFromModelretrieves the device data from the model.
 func (dMgr *Manager) getDeviceFromModel(ctx context.Context, deviceID string) (*voltha.Device, error) {
 	device := &voltha.Device{}
 	if have, err := dMgr.dProxy.Get(ctx, deviceID, device); err != nil {
@@ -609,7 +609,7 @@
 	return status.Errorf(codes.NotFound, "%s", deviceID)
 }
 
-//UpdatePortsState updates all ports on the device
+// UpdatePortsState updates all ports on the device
 func (dMgr *Manager) UpdatePortsState(ctx context.Context, deviceID string, portTypeFilter uint32, state voltha.OperStatus_Types) error {
 	logger.Debugw(ctx, "update-ports-state", log.Fields{"device-id": deviceID})
 	agent := dMgr.getDeviceAgent(ctx, deviceID)
@@ -678,7 +678,7 @@
 therefore use the data as is without trying to get the latest from the model.
 */
 
-//DisableAllChildDevices is invoked as a callback when the parent device is disabled
+// DisableAllChildDevices is invoked as a callback when the parent device is disabled
 func (dMgr *Manager) DisableAllChildDevices(ctx context.Context, parentCurrDevice *voltha.Device) error {
 	logger.Debug(ctx, "disable-all-child-devices")
 	ports, _ := dMgr.listDevicePorts(ctx, parentCurrDevice.Id)
@@ -693,7 +693,7 @@
 	return nil
 }
 
-//getAllChildDeviceIds is a helper method to get all the child device IDs from the device passed as parameter
+// getAllChildDeviceIds is a helper method to get all the child device IDs from the device passed as parameter
 func (dMgr *Manager) getAllChildDeviceIds(ctx context.Context, parentDevicePorts map[uint32]*voltha.Port) map[string]struct{} {
 	logger.Debug(ctx, "get-all-child-device-ids")
 	childDeviceIds := make(map[string]struct{}, len(parentDevicePorts))
@@ -706,7 +706,7 @@
 	return childDeviceIds
 }
 
-//GgtAllChildDevices is a helper method to get all the child device IDs from the device passed as parameter
+// GgtAllChildDevices is a helper method to get all the child device IDs from the device passed as parameter
 func (dMgr *Manager) getAllChildDevices(ctx context.Context, parentDeviceID string) (*voltha.Devices, error) {
 	logger.Debugw(ctx, "get-all-child-devices", log.Fields{"parent-device-id": parentDeviceID})
 	if parentDevicePorts, err := dMgr.listDevicePorts(ctx, parentDeviceID); err == nil {
diff --git a/rw_core/core/device/manager_nbi.go b/rw_core/core/device/manager_nbi.go
index b978823..f5146b1 100644
--- a/rw_core/core/device/manager_nbi.go
+++ b/rw_core/core/device/manager_nbi.go
@@ -1,17 +1,17 @@
 /*
- * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
+* Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
 
- * http://www.apache.org/licenses/LICENSE-2.0
+* http://www.apache.org/licenses/LICENSE-2.0
 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
  */
 package device
 
@@ -92,7 +92,7 @@
 	return &empty.Empty{}, agent.disableDevice(ctx)
 }
 
-//RebootDevice invoked the reboot API to the corresponding adapter
+// RebootDevice invoked the reboot API to the corresponding adapter
 func (dMgr *Manager) RebootDevice(ctx context.Context, id *voltha.ID) (*empty.Empty, error) {
 	ctx = utils.WithRPCMetadataContext(ctx, "RebootDevice")
 	log.EnrichSpan(ctx, log.Fields{"device-id": id.Id})
diff --git a/rw_core/core/device/manager_sbi.go b/rw_core/core/device/manager_sbi.go
index 0a5eff8..c8269bc 100644
--- a/rw_core/core/device/manager_sbi.go
+++ b/rw_core/core/device/manager_sbi.go
@@ -1,17 +1,17 @@
 /*
- * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
+* Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
 
- * http://www.apache.org/licenses/LICENSE-2.0
+* http://www.apache.org/licenses/LICENSE-2.0
 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
  */
 package device
 
@@ -251,8 +251,8 @@
 	return &empty.Empty{}, nil
 }
 
-//ChildDevicesLost is invoked by an adapter to indicate that a parent device is in a state (Disabled) where it
-//cannot manage the child devices.  This will trigger the Core to disable all the child devices.
+// ChildDevicesLost is invoked by an adapter to indicate that a parent device is in a state (Disabled) where it
+// cannot manage the child devices.  This will trigger the Core to disable all the child devices.
 func (dMgr *Manager) ChildDevicesLost(ctx context.Context, parentID *common.ID) (*empty.Empty, error) {
 	ctx = utils.WithNewSpanAndRPCMetadataContext(ctx, "ChildDevicesLost")
 	logger.Debugw(ctx, "child-devices-lost", log.Fields{"parent-id": parentID.Id})
@@ -268,7 +268,7 @@
 	return &empty.Empty{}, nil
 }
 
-//ChildDevicesDetected is invoked by an adapter when child devices are found, typically after after a
+// ChildDevicesDetected is invoked by an adapter when child devices are found, typically after after a
 // disable/enable sequence.  This will trigger the Core to Enable all the child devices of that parent.
 func (dMgr *Manager) ChildDevicesDetected(ctx context.Context, parentDeviceID *common.ID) (*empty.Empty, error) {
 	ctx = utils.WithNewSpanAndRPCMetadataContext(ctx, "ChildDevicesDetected")
diff --git a/rw_core/core/device/manager_state_callback.go b/rw_core/core/device/manager_state_callback.go
index 4dc5b7e..9fa7c53 100644
--- a/rw_core/core/device/manager_state_callback.go
+++ b/rw_core/core/device/manager_state_callback.go
@@ -1,17 +1,17 @@
 /*
- * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
+* Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
 
- * http://www.apache.org/licenses/LICENSE-2.0
+* http://www.apache.org/licenses/LICENSE-2.0
 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
  */
 package device
 
@@ -26,8 +26,7 @@
 	"google.golang.org/grpc/status"
 )
 
-//
-//CreateLogicalDevice creates logical device in core
+// CreateLogicalDevice creates logical device in core
 func (dMgr *Manager) CreateLogicalDevice(ctx context.Context, cDevice *voltha.Device) error {
 	logger.Info(ctx, "create-logical-device")
 	// Verify whether the logical device has already been created
@@ -103,7 +102,7 @@
 	return nil
 }
 
-//DeleteAllLogicalPorts is invoked as a callback when the parent device's connection status moves to UNREACHABLE
+// DeleteAllLogicalPorts is invoked as a callback when the parent device's connection status moves to UNREACHABLE
 func (dMgr *Manager) DeleteAllLogicalPorts(ctx context.Context, parentDevice *voltha.Device) error {
 	logger.Debugw(ctx, "delete-all-logical-ports", log.Fields{"parent-device-id": parentDevice.Id})
 	if err := dMgr.logicalDeviceMgr.deleteAllLogicalPorts(ctx, parentDevice); err != nil {
@@ -150,7 +149,7 @@
 	return nil
 }
 
-//DeleteAllDeviceFlows is invoked as a callback when the parent device's connection status moves to UNREACHABLE
+// DeleteAllDeviceFlows is invoked as a callback when the parent device's connection status moves to UNREACHABLE
 func (dMgr *Manager) DeleteAllDeviceFlows(ctx context.Context, parentDevice *voltha.Device) error {
 	logger.Debugw(ctx, "delete-all-device-flows", log.Fields{"parent-device-id": parentDevice.Id})
 	if agent := dMgr.getDeviceAgent(ctx, parentDevice.Id); agent != nil {
diff --git a/rw_core/core/device/meter/loader.go b/rw_core/core/device/meter/loader.go
index 941a23c..ffc8c48 100644
--- a/rw_core/core/device/meter/loader.go
+++ b/rw_core/core/device/meter/loader.go
@@ -160,7 +160,8 @@
 
 // ListIDs returns a snapshot of all the managed meter IDs
 // TODO: iterating through meters safely is expensive now, since all meters are stored & locked separately
-//       should avoid this where possible
+//
+//	should avoid this where possible
 func (loader *Loader) ListIDs() map[uint32]struct{} {
 	loader.lock.RLock()
 	defer loader.lock.RUnlock()
diff --git a/rw_core/core/device/port/loader.go b/rw_core/core/device/port/loader.go
index 2f322ef..c6b31e9 100644
--- a/rw_core/core/device/port/loader.go
+++ b/rw_core/core/device/port/loader.go
@@ -174,7 +174,8 @@
 
 // ListIDs returns a snapshot of all the managed port IDs
 // TODO: iterating through ports safely is expensive now, since all ports are stored & locked separately
-//       should avoid this where possible
+//
+//	should avoid this where possible
 func (loader *Loader) ListIDs() map[uint32]struct{} {
 	loader.lock.RLock()
 	defer loader.lock.RUnlock()
diff --git a/rw_core/flowdecomposition/flow_decomposer.go b/rw_core/flowdecomposition/flow_decomposer.go
index 905d649..19c5062 100644
--- a/rw_core/flowdecomposition/flow_decomposer.go
+++ b/rw_core/flowdecomposition/flow_decomposer.go
@@ -50,7 +50,7 @@
 	return &FlowDecomposer{getDevice: getDevice}
 }
 
-//DecomposeRules decomposes per-device flows and flow-groups from the flows and groups defined on a logical device
+// DecomposeRules decomposes per-device flows and flow-groups from the flows and groups defined on a logical device
 func (fd *FlowDecomposer) DecomposeRules(ctx context.Context, agent LogicalDeviceAgent, flows map[uint64]*ofp.OfpFlowStats, groups map[uint32]*ofp.OfpGroupEntry) (*fu.DeviceRules, error) {
 	deviceRules := *fu.NewDeviceRules()
 	devicesToUpdate := make(map[string]string)
@@ -110,7 +110,7 @@
 	return newDeviceRules, nil
 }
 
-//processControllerBoundFlow decomposes trap flows
+// processControllerBoundFlow decomposes trap flows
 func (fd *FlowDecomposer) processControllerBoundFlow(ctx context.Context, agent LogicalDeviceAgent, path []route.Hop,
 	inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats) (*fu.DeviceRules, error) {
 
diff --git a/rw_core/mocks/adapter.go b/rw_core/mocks/adapter.go
index 7b8cbf2..73d7a16 100644
--- a/rw_core/mocks/adapter.go
+++ b/rw_core/mocks/adapter.go
@@ -278,7 +278,7 @@
 	return &empty.Empty{}, nil
 }
 
-//Packets
+// Packets
 func (ta *Adapter) SendPacketOut(ctx context.Context, packet *ca.PacketOut) (*empty.Empty, error) {
 	return &empty.Empty{}, nil
 }
diff --git a/rw_core/profile.go b/rw_core/profile.go
index 8a2fefd..91acf3d 100644
--- a/rw_core/profile.go
+++ b/rw_core/profile.go
@@ -1,3 +1,4 @@
+//go:build profile
 // +build profile
 
 /*
diff --git a/rw_core/release.go b/rw_core/release.go
index 29032c0..2354117 100644
--- a/rw_core/release.go
+++ b/rw_core/release.go
@@ -1,3 +1,4 @@
+//go:build !profile
 // +build !profile
 
 /*
diff --git a/rw_core/route/device_route.go b/rw_core/route/device_route.go
index 4e7efb8..4d02a54 100644
--- a/rw_core/route/device_route.go
+++ b/rw_core/route/device_route.go
@@ -79,7 +79,7 @@
 	}
 }
 
-//IsRootPort returns true if the port is a root port on a logical device
+// IsRootPort returns true if the port is a root port on a logical device
 func (dr *DeviceRoutes) IsRootPort(port uint32) bool {
 	dr.rootPortsLock.RLock()
 	defer dr.rootPortsLock.RUnlock()
@@ -124,7 +124,7 @@
 	dr.reset()
 }
 
-//ComputeRoutes calculates all the routes between the logical ports.  This will clear up any existing route
+// ComputeRoutes calculates all the routes between the logical ports.  This will clear up any existing route
 func (dr *DeviceRoutes) ComputeRoutes(ctx context.Context, lps map[uint32]*voltha.LogicalPort) error {
 	dr.routeBuildLock.Lock()
 	defer dr.routeBuildLock.Unlock()
@@ -414,7 +414,7 @@
 	return routes, fmt.Errorf("no half route found for ingress port %d, egress port %d and nni as egress %t", ingress, egress, nniAsEgress)
 }
 
-//getDeviceWithCacheUpdate returns the from the model and updates the PON ports map of that device.
+// getDeviceWithCacheUpdate returns the from the model and updates the PON ports map of that device.
 func (dr *DeviceRoutes) getDeviceWithCacheUpdate(ctx context.Context, deviceID string) (map[uint32]*voltha.Port, error) {
 	devicePorts, err := dr.listDevicePorts(ctx, deviceID)
 	if err != nil {
@@ -425,7 +425,7 @@
 	return devicePorts, nil
 }
 
-//copyFromExistingNNIRoutes copies routes from an existing set of NNI routes
+// copyFromExistingNNIRoutes copies routes from an existing set of NNI routes
 func (dr *DeviceRoutes) copyFromExistingNNIRoutes(newNNIPort *voltha.LogicalPort, copyFromNNIPort *voltha.LogicalPort) {
 	updatedRoutes := make(map[PathID][]Hop)
 	for key, val := range dr.Routes {
@@ -457,12 +457,12 @@
 	dr.childConnectionPort = make(map[string]uint32)
 }
 
-//concatDeviceIdPortId formats a portid using the device id and the port number
+// concatDeviceIdPortId formats a portid using the device id and the port number
 func concatDeviceIDPortID(deviceID string, portNo uint32) string {
 	return fmt.Sprintf("%s:%d", deviceID, portNo)
 }
 
-//getReverseRoute returns the reverse of the route
+// getReverseRoute returns the reverse of the route
 func getReverseRoute(route []Hop) []Hop {
 	reverse := make([]Hop, len(route))
 	for i, j := 0, len(route)-1; j >= 0; i, j = i+1, j-1 {
diff --git a/rw_core/route/device_route_test.go b/rw_core/route/device_route_test.go
index f641678..4fbda0e 100644
--- a/rw_core/route/device_route_test.go
+++ b/rw_core/route/device_route_test.go
@@ -40,13 +40,13 @@
 
 type contextKey string
 
-//portRegistration is a message sent from an OLT device to a logical device to create a logical port
+// portRegistration is a message sent from an OLT device to a logical device to create a logical port
 type portRegistration struct {
 	port     *voltha.Port
 	rootPort bool
 }
 
-//onuRegistration is a message sent from an ONU device to an OLT device to register an ONU
+// onuRegistration is a message sent from an ONU device to an OLT device to register an ONU
 type onuRegistration struct {
 	onuID    string
 	onuPorts map[uint32]*voltha.Port
diff --git a/rw_core/test/core_nbi_handler_multi_test.go b/rw_core/test/core_nbi_handler_multi_test.go
index e6c4213..dc154c0 100755
--- a/rw_core/test/core_nbi_handler_multi_test.go
+++ b/rw_core/test/core_nbi_handler_multi_test.go
@@ -2117,7 +2117,7 @@
 	nb.testAdapterRegistration(t, nbi)
 }
 
-//TestLogDeviceUpdate is used to extract and format device updates.  Not to be run on jenkins.
+// TestLogDeviceUpdate is used to extract and format device updates.  Not to be run on jenkins.
 func TestLogDeviceUpdate(t *testing.T) {
 	t.Skip()
 	var inputFile = os.Getenv("LGF")
diff --git a/rw_core/test/utils.go b/rw_core/test/utils.go
index f2bf3e0..835d378 100644
--- a/rw_core/test/utils.go
+++ b/rw_core/test/utils.go
@@ -165,7 +165,7 @@
 	fmt.Println("RepeatedIDs", repeatedTrnsID, "TransID:", len(uniqueTnsIDs))
 }
 
-//CreateMockAdapter creates mock OLT and ONU adapters - this will automatically the grpc service hosted by that
+// CreateMockAdapter creates mock OLT and ONU adapters - this will automatically the grpc service hosted by that
 // adapter
 func CreateMockAdapter(
 	ctx context.Context,
@@ -189,7 +189,7 @@
 	return adpt, nil
 }
 
-//CreateAndRegisterAdapters creates mock ONU and OLT adapters and registers them to rw-core
+// CreateAndRegisterAdapters creates mock ONU and OLT adapters and registers them to rw-core
 func CreateAndRegisterAdapters(
 	ctx context.Context,
 	t *testing.T,
@@ -282,7 +282,7 @@
 	return oltAdaptersMap, onuAdaptersMap
 }
 
-//StartEmbeddedEtcdServer creates and starts an Embedded etcd server locally.
+// StartEmbeddedEtcdServer creates and starts an Embedded etcd server locally.
 func StartEmbeddedEtcdServer(ctx context.Context, configName, storageDir, logLevel string) (*mock_etcd.EtcdServer, int, error) {
 	kvClientPort, err := freeport.GetFreePort()
 	if err != nil {
@@ -299,14 +299,14 @@
 	return etcdServer, kvClientPort, nil
 }
 
-//StopEmbeddedEtcdServer stops the embedded etcd server
+// StopEmbeddedEtcdServer stops the embedded etcd server
 func StopEmbeddedEtcdServer(ctx context.Context, server *mock_etcd.EtcdServer) {
 	if server != nil {
 		server.Stop(ctx)
 	}
 }
 
-//SetupKVClient creates a new etcd client
+// SetupKVClient creates a new etcd client
 func SetupKVClient(ctx context.Context, cf *config.RWCoreFlags, coreInstanceID string) kvstore.Client {
 	client, err := kvstore.NewEtcdClient(ctx, cf.KVStoreAddress, cf.KVStoreTimeout, log.FatalLevel)
 	if err != nil {
@@ -315,7 +315,7 @@
 	return client
 }
 
-//getRandomMacAddress returns a random mac address
+// getRandomMacAddress returns a random mac address
 func getRandomMacAddress() string {
 	rand.Seed(time.Now().UnixNano() / int64(rand.Intn(255)+1))
 	return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
diff --git a/rw_core/utils/core_utils.go b/rw_core/utils/core_utils.go
index 7e1eb87..4f639aa 100644
--- a/rw_core/utils/core_utils.go
+++ b/rw_core/utils/core_utils.go
@@ -108,11 +108,11 @@
 	}
 }
 
-//WaitForNilOrErrorResponses waits on a variadic number of channels for either a nil response or an error
-//response. If an error is received from a given channel then the returned error array will contain that error.
-//The error will be at the index corresponding to the order in which the channel appear in the parameter list.
-//If no errors is found then nil is returned.  This method also takes in a timeout in milliseconds. If a
-//timeout is obtained then this function will stop waiting for the remaining responses and abort.
+// WaitForNilOrErrorResponses waits on a variadic number of channels for either a nil response or an error
+// response. If an error is received from a given channel then the returned error array will contain that error.
+// The error will be at the index corresponding to the order in which the channel appear in the parameter list.
+// If no errors is found then nil is returned.  This method also takes in a timeout in milliseconds. If a
+// timeout is obtained then this function will stop waiting for the remaining responses and abort.
 func WaitForNilOrErrorResponses(timeout time.Duration, responses ...Response) []error {
 	timedOut := make(chan struct{})
 	timer := time.AfterFunc(timeout, func() { close(timedOut) })