vgc UTs part 4

Change-Id: I0e78854fefb8f0ad270a84bc88982f859a0d5995
diff --git a/internal/pkg/application/application_test.go b/internal/pkg/application/application_test.go
index 15071e3..7d92230 100644
--- a/internal/pkg/application/application_test.go
+++ b/internal/pkg/application/application_test.go
@@ -22,6 +22,7 @@
 	"reflect"
 	"sync"
 	"testing"
+	"time"
 	"voltha-go-controller/internal/pkg/controller"
 	"voltha-go-controller/internal/pkg/intf"
 	"voltha-go-controller/internal/pkg/of"
@@ -2697,3 +2698,241 @@
 		})
 	}
 }
+
+func TestVoltApplication_DeleteMacInPortMap(t *testing.T) {
+	type args struct {
+		macAddr net.HardwareAddr
+	}
+
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	macPort := map[string]string{}
+	macPort[macAdd.String()] = test_data
+
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "Positive_Case_DeleteMacInPortMap",
+			args: args{
+				macAddr: macAdd,
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{
+				macPortMap: macPort,
+			}
+			va.DeleteMacInPortMap(tt.args.macAddr)
+		})
+	}
+}
+
+func TestVoltApplication_TriggerPendingServiceDeactivateReq(t *testing.T) {
+	type args struct {
+		cntx   context.Context
+		device string
+	}
+	ServicesDeactivate := map[string]bool{}
+	ServicesDeactivate["SDX6320031-1_SDX6320031-1-4096-2310-4096-65"] = true
+	voltServ := &VoltService{
+		VoltServiceOper: VoltServiceOper{
+			Device: "SDX6320031",
+		},
+		VoltServiceCfg: VoltServiceCfg{
+			Name:          "SDX6320031-1_SDX6320031-1-4096-2310-4096-65",
+			SVlan:         4096,
+			CVlan:         2310,
+			UniVlan:       4096,
+			Port:          "16777472",
+			TechProfileID: 65,
+		},
+	}
+
+	voltPortVnets := make([]*VoltPortVnet, 0)
+	voltPortVnet := &VoltPortVnet{
+		Device:           "SDX6320031",
+		Port:             "16777472",
+		DeleteInProgress: false,
+		services:         sync.Map{},
+		SVlan:            4096,
+		CVlan:            2310,
+		UniVlan:          4096,
+		SVlanTpid:        65,
+		servicesCount:    atomic.NewUint64(1),
+	}
+
+	voltPortVnets = append(voltPortVnets, voltPortVnet)
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "Positive_Case_DeleteMacInPortMap",
+			args: args{
+				cntx:   context.Background(),
+				device: "SDX6320031",
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{
+				ServicesToDeactivate: ServicesDeactivate,
+				ServiceByName:        sync.Map{},
+				VnetsByPort:          sync.Map{},
+			}
+			va.ServiceByName.Store("SDX6320031-1_SDX6320031-1-4096-2310-4096-65", voltServ)
+			va.VnetsByPort.Store("16777472", voltPortVnets)
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			dbintf.EXPECT().PutService(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+			dbintf.EXPECT().PutVpv(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
+			va.TriggerPendingServiceDeactivateReq(tt.args.cntx, tt.args.device)
+		})
+	}
+}
+
+func TestVoltApplication_ReadAllFromDb(t *testing.T) {
+	type args struct {
+		cntx context.Context
+	}
+
+	migrationInfo := "migration done"
+	deviceConfig := DeviceConfig{
+		SerialNumber:       "SDX6320031",
+		UplinkPort:         "16777472",
+		HardwareIdentifier: "0.0.0.0",
+		IPAddress:          "127.26.1.74",
+		NasID:              "12345",
+		NniDhcpTrapVid:     123,
+	}
+
+	voltVnet := &VoltVnet{
+		Version: "v3",
+		VnetConfig: VnetConfig{
+			Name:      "2310-4096-4096",
+			VnetType:  "Encapsulation",
+			SVlan:     2310,
+			CVlan:     4096,
+			UniVlan:   4096,
+			SVlanTpid: 33024,
+		},
+
+		VnetOper: VnetOper{
+			PendingDeviceToDelete: "SDX6320031",
+			DeleteInProgress:      true,
+		},
+	}
+
+	cuncurrentMap := &util.ConcurrentMap{
+		Count: atomic.NewUint64(0),
+	}
+
+	vnetToDelete := map[string]bool{}
+	vnetToDelete["2310-4096-4096"] = true
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	voltPortVnet := &VoltPortVnet{
+		Device:           "SDX6320031",
+		Port:             "16777472",
+		DeleteInProgress: true,
+		MacAddr:          macAdd,
+	}
+
+	macPortMap := map[string]string{}
+	voltPortVnetsToDelete := map[*VoltPortVnet]bool{}
+	voltPortVnetsToDelete[voltPortVnet] = true
+	macPortMap[macAdd.String()] = "16777472"
+
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "Positive_Case_ReadAllFromDb",
+			args: args{
+				cntx: context.Background(),
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{
+				VnetsBySvlan:          util.NewConcurrentMap(),
+				VnetsToDelete:         vnetToDelete,
+				macPortMap:            macPortMap,
+				VoltPortVnetsToDelete: voltPortVnetsToDelete,
+				VnetsByName:           sync.Map{},
+			}
+
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			dbintf.EXPECT().GetMeters(gomock.Any()).AnyTimes()
+			vnet, _ := json.Marshal(voltVnet)
+			voltVnets := map[string]*kvstore.KVPair{}
+			voltVnets["2310-4096-4096"] = &kvstore.KVPair{
+				Key:   "2310-4096-4096",
+				Value: vnet,
+			}
+
+			va.VnetsBySvlan.Set(of.VlanAny, cuncurrentMap)
+			dbintf.EXPECT().GetVnets(gomock.Any()).AnyTimes().Return(voltVnets, nil).AnyTimes()
+			dbintf.EXPECT().PutVnet(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil).AnyTimes()
+			vpvs, _ := json.Marshal(voltPortVnet)
+			voltPort := map[string]*kvstore.KVPair{}
+			voltPort["16777472"] = &kvstore.KVPair{
+				Key:   "16777472",
+				Value: vpvs,
+			}
+			va.VnetsByName.Store("2310-4096-4096", voltVnet)
+			dbintf.EXPECT().GetVpvs(gomock.Any()).AnyTimes().Return(voltPort, nil).AnyTimes()
+			dbintf.EXPECT().GetServices(gomock.Any()).AnyTimes()
+			dbintf.EXPECT().GetMvlans(gomock.Any()).AnyTimes()
+			dbintf.EXPECT().GetIgmpProfiles(gomock.Any()).AnyTimes()
+			dbintf.EXPECT().GetMcastConfigs(gomock.Any()).AnyTimes()
+			dbintf.EXPECT().GetIgmpGroups(gomock.Any()).AnyTimes()
+			dbintf.EXPECT().GetMigrationInfo(gomock.Any()).Return(migrationInfo, nil).AnyTimes()
+			dbintf.EXPECT().GetOltFlowService(gomock.Any()).AnyTimes()
+			b, _ := json.Marshal(deviceConfig)
+			test := map[string]*kvstore.KVPair{}
+			test["SDX6320031"] = &kvstore.KVPair{
+				Key:   "SDX6320031",
+				Value: b,
+			}
+			dbintf.EXPECT().GetDeviceConfig(gomock.Any()).Return(test, nil).AnyTimes()
+			dbintf.EXPECT().PutDeviceConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+			va.ReadAllFromDb(tt.args.cntx)
+		})
+	}
+}
+
+func TestVoltApplication_RemoveGroupDevicesFromPendingPool(t *testing.T) {
+	type args struct {
+		ig *IgmpGroup
+	}
+	pendingGroupForDevice := map[string]time.Time{}
+	pendingGroupForDevice[test_device] = time.Now()
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_RemoveGroupDevicesFromPendingPool",
+			args: args{
+				ig: &IgmpGroup{
+					Version:               "test_version",
+					PendingGroupForDevice: pendingGroupForDevice,
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			va.RemoveGroupDevicesFromPendingPool(tt.args.ig)
+		})
+	}
+}
diff --git a/internal/pkg/application/dhcprelay_test.go b/internal/pkg/application/dhcprelay_test.go
new file mode 100644
index 0000000..2354d41
--- /dev/null
+++ b/internal/pkg/application/dhcprelay_test.go
@@ -0,0 +1,637 @@
+/*
+* Copyright 2022-present Open Networking Foundation
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package application
+
+import (
+	"context"
+	"encoding/json"
+	"net"
+	"reflect"
+	"sync"
+	"testing"
+	"voltha-go-controller/internal/pkg/of"
+	"voltha-go-controller/internal/test/mocks"
+
+	"github.com/golang/mock/gomock"
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestVoltApplication_GetIgnoredPorts(t *testing.T) {
+	voltDevice := &VoltDevice{
+		Name:         "11c3175b-50f3-4220-9555-93df733ded1d",
+		SerialNum:    "SDX6320031",
+		SouthBoundID: "68580342-6b3e-57cb-9ea4-06125594e330",
+		NniPort:      "16777472",
+		Ports:        sync.Map{},
+		PonPortList:  sync.Map{},
+	}
+	voltPort := &VoltPort{
+		Name:                     "16777472",
+		Device:                   "SDX6320031",
+		ID:                       16777472,
+		State:                    PortStateDown,
+		ChannelPerSubAlarmRaised: false,
+		Type:                     VoltPortTypeNni,
+	}
+	voltPortVnets := make([]*VoltPortVnet, 0)
+	voltPortVnet := &VoltPortVnet{
+		Device:      "SDX6320031",
+		Port:        "16777472",
+		MacLearning: MacLearningNone,
+	}
+	voltPortVnets = append(voltPortVnets, voltPortVnet)
+	IgnoredPorts := make(map[string][]string)
+	IgnoredPorts["SDX6320031"] = append(IgnoredPorts["SDX6320031"], "16777472")
+	tests := []struct {
+		name    string
+		want    map[string][]string
+		wantErr bool
+	}{
+		{
+			name:    "Positive_Case_GetIgnoredPorts",
+			want:    IgnoredPorts,
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{
+				DevicesDisc: sync.Map{},
+			}
+			va.DevicesDisc.Store("SDX6320031", voltDevice)
+			voltDevice.Ports.Store("16777472", voltPort)
+			voltApp := GetApplication()
+			voltApp.VnetsByPort.Store("16777472", voltPortVnets)
+			got, err := va.GetIgnoredPorts()
+			if (err != nil) != tt.wantErr {
+				t.Errorf("VoltApplication.GetIgnoredPorts() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("VoltApplication.GetIgnoredPorts() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestDhcpNetworks_AddDhcpSession(t *testing.T) {
+	pkt := mocks.NewMockPacket(gomock.NewController(t))
+	type args struct {
+		pkt     gopacket.Packet
+		session IDhcpRelaySession
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "DhcpNetworks_AddDhcpSession",
+			args: args{
+				pkt:     pkt,
+				session: &VoltPortVnet{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			pkt.EXPECT().Layer(layers.LayerTypeEthernet).Return(eth).Times(1)
+			if err := dn.AddDhcpSession(tt.args.pkt, tt.args.session); (err != nil) != tt.wantErr {
+				t.Errorf("DhcpNetworks.AddDhcpSession() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDhcpNetworks_DelDhcpSession(t *testing.T) {
+	pkt := mocks.NewMockPacket(gomock.NewController(t))
+	type args struct {
+		pkt     gopacket.Packet
+		session IDhcpRelaySession
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "DhcpNetworks_DelDhcpSession",
+			args: args{
+				pkt:     pkt,
+				session: &VoltPortVnet{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			pkt.EXPECT().Layer(layers.LayerTypeEthernet).Return(eth).Times(1)
+			dn.DelDhcpSession(tt.args.pkt, tt.args.session)
+		})
+	}
+}
+
+func TestDhcpNetworks_AddDhcp6Session(t *testing.T) {
+	type args struct {
+		key     [MaxLenDhcpv6DUID]byte
+		session IDhcpRelaySession
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "DhcpNetworks_AddDhcp6Session",
+			args: args{
+				session: &VoltPortVnet{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			if err := dn.AddDhcp6Session(tt.args.key, tt.args.session); (err != nil) != tt.wantErr {
+				t.Errorf("DhcpNetworks.AddDhcp6Session() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDhcpNetworks_DelDhcp6Session(t *testing.T) {
+	type args struct {
+		key     [MaxLenDhcpv6DUID]byte
+		session IDhcpRelaySession
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "DhcpNetworks_DelDhcp6Session",
+			args: args{
+				session: &VoltPortVnet{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			network[uint32(4097)] = &DhcpRelayVnet{
+				InnerVlan: uint16(4097),
+			}
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			dn.DelDhcp6Session(tt.args.key, tt.args.session)
+		})
+	}
+}
+
+func TestDhcpNetworks_GetDhcpSession(t *testing.T) {
+	type fields struct {
+		Networks map[uint32]*DhcpRelayVnet
+	}
+	type args struct {
+		outerVlan uint16
+		innerVlan uint16
+		addr      net.HardwareAddr
+	}
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   IDhcpRelaySession
+	}{
+		{
+			name: "DhcpNetworks_GetDhcpSession",
+			args: args{
+				outerVlan: uint16(0),
+				innerVlan: uint16(4097),
+				addr:      macAdd,
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			network[uint32(4097)] = &DhcpRelayVnet{
+				InnerVlan: uint16(4097),
+			}
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			got, err := dn.GetDhcpSession(tt.args.outerVlan, tt.args.innerVlan, tt.args.addr)
+			assert.NotNil(t, err)
+			assert.Nil(t, got)
+		})
+	}
+}
+
+func TestDhcpNetworks_GetDhcp6Session(t *testing.T) {
+	type fields struct {
+		Networks map[uint32]*DhcpRelayVnet
+	}
+	type args struct {
+		outerVlan uint16
+		innerVlan uint16
+		key       [MaxLenDhcpv6DUID]byte
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		want    IDhcpRelaySession
+		wantErr bool
+	}{
+		{
+			name: "DhcpNetworks_GetDhcp6Session",
+			args: args{
+				outerVlan: uint16(0),
+				innerVlan: uint16(4097),
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			network := make(map[uint32]*DhcpRelayVnet)
+			network[uint32(4097)] = &DhcpRelayVnet{
+				InnerVlan: uint16(4097),
+			}
+			dn := &DhcpNetworks{
+				Networks: network,
+			}
+			got, err := dn.GetDhcp6Session(tt.args.outerVlan, tt.args.innerVlan, tt.args.key)
+			assert.NotNil(t, err)
+			assert.Nil(t, got)
+		})
+	}
+}
+
+func TestGetVnetForV4Nni(t *testing.T) {
+	type args struct {
+		dhcp  *layers.DHCPv4
+		cvlan of.VlanType
+		svlan of.VlanType
+		pbit  uint8
+	}
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	tests := []struct {
+		name    string
+		args    args
+		want    []*VoltPortVnet
+		wantErr bool
+	}{
+		{
+			name: "GetVnetForV4Nni",
+			args: args{
+				cvlan: of.VlanAny,
+				svlan: of.VlanAny,
+				dhcp: &layers.DHCPv4{
+					BaseLayer:    dot1Q.BaseLayer,
+					ClientHWAddr: macAdd,
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := GetVnetForV4Nni(tt.args.dhcp, tt.args.cvlan, tt.args.svlan, tt.args.pbit)
+			assert.NotNil(t, err)
+			assert.Nil(t, got)
+		})
+	}
+}
+
+func TestGetVnetForV6Nni(t *testing.T) {
+	type args struct {
+		dhcp      *layers.DHCPv6
+		cvlan     of.VlanType
+		svlan     of.VlanType
+		pbit      uint8
+		clientMAC net.HardwareAddr
+	}
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	tests := []struct {
+		name    string
+		args    args
+		want    []*VoltPortVnet
+		want1   net.HardwareAddr
+		wantErr bool
+	}{
+		{
+			name: "GetVnetForV6Nni",
+			args: args{
+				dhcp: &layers.DHCPv6{
+					BaseLayer: dot1Q.BaseLayer,
+					Options: layers.DHCPv6Options{
+						{
+							Code: layers.DHCPv6OptClientID,
+							Data: []byte{2, 3, 4, 2, 3, 4, 2, 3, 4},
+						},
+					},
+				},
+				cvlan:     of.VlanAny,
+				svlan:     of.VlanAny,
+				clientMAC: macAdd,
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1, err := GetVnetForV6Nni(tt.args.dhcp, tt.args.cvlan, tt.args.svlan, tt.args.pbit, tt.args.clientMAC)
+			assert.NotNil(t, err)
+			assert.Nil(t, got)
+			assert.NotNil(t, got1)
+		})
+	}
+}
+
+func TestAddDhcpv4Option82(t *testing.T) {
+	type args struct {
+		svc    *VoltService
+		rID    []byte
+		dhcpv4 *layers.DHCPv4
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "AddDhcpv4Option82",
+			args: args{
+				svc: &VoltService{
+					VoltServiceCfg: VoltServiceCfg{
+						CircuitID:    "test_circuit_id",
+						DataRateAttr: DSLAttrEnabled,
+					},
+				},
+				rID: []byte{1},
+				dhcpv4: &layers.DHCPv4{
+					Options: layers.DHCPOptions{
+						{
+							Type: layers.DHCPOptARPTimeout,
+						},
+					},
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			AddDhcpv4Option82(tt.args.svc, tt.args.rID, tt.args.dhcpv4)
+		})
+	}
+}
+
+func TestVoltApplication_ProcessDsDhcpv4Packet(t *testing.T) {
+	pkt := mocks.NewMockPacket(gomock.NewController(t))
+	type args struct {
+		cntx   context.Context
+		device string
+		port   string
+		pkt    gopacket.Packet
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_ProcessDsDhcpv4Packet",
+			args: args{
+				cntx:   context.Background(),
+				device: test_device,
+				port:   "test_port",
+				pkt:    pkt,
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			iPv4 := &layers.IPv4{
+				Version: uint8(1),
+			}
+			uDP := &layers.UDP{
+				Length: uint16(1),
+			}
+			dHCPv4 := &layers.DHCPv4{
+				HardwareLen: uint8(1),
+			}
+			dot1Q_test := &layers.Dot1Q{
+				Priority: uint8(1),
+			}
+			pkt.EXPECT().Layer(layers.LayerTypeEthernet).Return(eth).Times(1)
+			pkt.EXPECT().Layer(layers.LayerTypeIPv4).Return(iPv4).Times(1)
+			pkt.EXPECT().Layer(layers.LayerTypeUDP).Return(uDP).Times(1)
+			pkt.EXPECT().Layer(layers.LayerTypeDHCPv4).Return(dHCPv4).Times(1)
+			pkt.EXPECT().Layer(layers.LayerTypeDot1Q).Return(dot1Q_test).Times(1)
+			pkt.EXPECT().Layers().Return(LayerTypeDot2Q).Times(1)
+			va.ProcessDsDhcpv4Packet(tt.args.cntx, tt.args.device, tt.args.port, tt.args.pkt)
+		})
+	}
+}
+
+func TestDelOption82(t *testing.T) {
+	type args struct {
+		dhcpv4 *layers.DHCPv4
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "DelOption82",
+			args: args{
+				dhcpv4: &layers.DHCPv4{
+					Options: layers.DHCPOptions{
+						{
+							Type: opt82,
+						},
+					},
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			DelOption82(tt.args.dhcpv4)
+		})
+	}
+}
+
+func TestDhcpMsgType(t *testing.T) {
+	type args struct {
+		dhcp *layers.DHCPv4
+	}
+	tests := []struct {
+		name string
+		args args
+		want layers.DHCPMsgType
+	}{
+		{
+			name: "DhcpMsgType",
+			args: args{
+				dhcp: &layers.DHCPv4{
+					Options: layers.DHCPOptions{
+						{
+							Type: layers.DHCPOptMessageType,
+							Data: []byte{1},
+						},
+					},
+				},
+			},
+			want: layers.DHCPMsgTypeDiscover,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := DhcpMsgType(tt.args.dhcp); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("DhcpMsgType() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestGetIpv4Addr(t *testing.T) {
+	type args struct {
+		dhcp *layers.DHCPv4
+	}
+	tests := []struct {
+		name  string
+		args  args
+		want  net.IP
+		want1 int64
+	}{
+		{
+			name: "GetIpv4Addr",
+			args: args{
+				dhcp: &layers.DHCPv4{
+					Options: layers.DHCPOptions{
+						{
+							Type: layers.DHCPOptLeaseTime,
+							Data: []byte{1, 2, 3, 4, 5},
+						},
+					},
+				},
+			},
+			want1: int64(16909060),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1 := GetIpv4Addr(tt.args.dhcp)
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("GetIpv4Addr() got = %v, want %v", got, tt.want)
+			}
+			if got1 != tt.want1 {
+				t.Errorf("GetIpv4Addr() got1 = %v, want %v", got1, tt.want1)
+			}
+		})
+	}
+}
+
+func TestGetIpv6Addr(t *testing.T) {
+	type args struct {
+		dhcp6 *layers.DHCPv6
+	}
+	b, err := json.Marshal(layers.DHCPv6OptIAAddr)
+	if err != nil {
+		panic(err)
+	}
+	tests := []struct {
+		name  string
+		args  args
+		want  net.IP
+		want1 uint32
+	}{
+		{
+			name: "GetIpv6Addr_error",
+			args: args{
+				dhcp6: &layers.DHCPv6{
+					MsgType: layers.DHCPv6MsgTypeReply,
+					Options: layers.DHCPv6Options{
+						{
+							Code: layers.DHCPv6OptIANA,
+							Data: b,
+						},
+					},
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1 := GetIpv6Addr(tt.args.dhcp6)
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("GetIpv6Addr() got = %v, want %v", got, tt.want)
+			}
+			if got1 != tt.want1 {
+				t.Errorf("GetIpv6Addr() got1 = %v, want %v", got1, tt.want1)
+			}
+		})
+	}
+}
+
+func TestVoltApplication_GetMacLearnerInfo(t *testing.T) {
+	type args struct {
+		cntx       context.Context
+		deviceID   string
+		portNumber string
+		vlanID     string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    MacLearnerInfo
+		wantErr bool
+	}{
+		{
+			name: "VoltApplication_GetMacLearnerInfo",
+			args: args{
+				cntx:       context.Background(),
+				deviceID:   test_device,
+				portNumber: "test_port_number",
+				vlanID:     "test_vlanID",
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			got, err := va.GetMacLearnerInfo(tt.args.cntx, tt.args.deviceID, tt.args.portNumber, tt.args.vlanID)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("VoltApplication.GetMacLearnerInfo() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("VoltApplication.GetMacLearnerInfo() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/internal/pkg/application/flowevent_test.go b/internal/pkg/application/flowevent_test.go
index 400176a..b6b8738 100644
--- a/internal/pkg/application/flowevent_test.go
+++ b/internal/pkg/application/flowevent_test.go
@@ -132,6 +132,21 @@
 				},
 			},
 		},
+		{
+			name: "ProcessUsIgmpFlowAddEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					device:    "test_device",
+					eType:     EventTypeControlFlowAdded,
+					eventData: voltPortVnet,
+				},
+				flowStatus: intf.FlowStatus{
+					Device: "test_device",
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -161,6 +176,19 @@
 				},
 			},
 		},
+		{
+			name: "ProcessServiceFlowAddEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					device:    "test_device",
+					eventData: voltService,
+				},
+				flowStatus: intf.FlowStatus{
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -188,6 +216,18 @@
 				},
 			},
 		},
+		{
+			name: "ProcessControlFlowAddEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					eventData: voltPortVnet,
+				},
+				flowStatus: intf.FlowStatus{
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -215,6 +255,18 @@
 				},
 			},
 		},
+		{
+			name: "ProcessServiceFlowDelEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					eventData: voltService,
+				},
+				flowStatus: intf.FlowStatus{
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -245,6 +297,18 @@
 				},
 			},
 		},
+		{
+			name: "ProcessControlFlowDelEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					eventData: voltPortVnet,
+				},
+				flowStatus: intf.FlowStatus{
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -278,6 +342,18 @@
 				},
 			},
 		},
+		{
+			name: "ProcessMcastFlowDelEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					eventData: mvlanProfile,
+				},
+				flowStatus: intf.FlowStatus{
+					Status: uint32(1001),
+				},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -288,3 +364,56 @@
 		})
 	}
 }
+
+func TestProcessDeviceFlowDelEvent(t *testing.T) {
+	type args struct {
+		cntx       context.Context
+		event      *FlowEvent
+		flowStatus intf.FlowStatus
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "ProcessDeviceFlowDelEvent",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					device:    test_device,
+					eventData: voltVnet,
+				},
+				flowStatus: intf.FlowStatus{
+					Device: test_device,
+				},
+			},
+		},
+		{
+			name: "ProcessDeviceFlowDelEvent_else_condition",
+			args: args{
+				cntx: context.Background(),
+				event: &FlowEvent{
+					device:    test_device,
+					eventData: voltVnet,
+				},
+				flowStatus: intf.FlowStatus{
+					Device: test_device,
+					Status: uint32(1001),
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			switch tt.name {
+			case "ProcessDeviceFlowDelEvent":
+				dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+				db = dbintf
+				dbintf.EXPECT().PutVnet(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil).AnyTimes()
+				ProcessDeviceFlowDelEvent(tt.args.cntx, tt.args.event, tt.args.flowStatus)
+			case "ProcessDeviceFlowDelEvent_else_condition":
+				ProcessDeviceFlowDelEvent(tt.args.cntx, tt.args.event, tt.args.flowStatus)
+			}
+		})
+	}
+}
diff --git a/internal/pkg/application/major_upgrade_test.go b/internal/pkg/application/major_upgrade_test.go
new file mode 100644
index 0000000..47e1f45
--- /dev/null
+++ b/internal/pkg/application/major_upgrade_test.go
@@ -0,0 +1,1038 @@
+/*
+* Copyright 2022-present Open Networking Foundation
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+package application
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"reflect"
+	"testing"
+	"voltha-go-controller/internal/test/mocks"
+
+	"github.com/golang/mock/gomock"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+)
+
+func TestDeleteDbPathKeys(t *testing.T) {
+	type args struct {
+		cntx    context.Context
+		keyPath string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "Positive_Case_DeleteDbPathKeys",
+			args: args{
+				cntx:    context.Background(),
+				keyPath: "test_key",
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			dbintf.EXPECT().DeleteAll(gomock.Any(), gomock.Any()).AnyTimes()
+			if err := DeleteDbPathKeys(tt.args.cntx, tt.args.keyPath); (err != nil) != tt.wantErr {
+				t.Errorf("DeleteDbPathKeys() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestMigrateVnets(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	voltVnet_test := &VoltVnet{
+		Version: "v3",
+		VnetConfig: VnetConfig{
+			Name:      "2310-4096-4096",
+			VnetType:  "Encapsulation",
+			SVlan:     2310,
+			CVlan:     4096,
+			UniVlan:   4096,
+			SVlanTpid: 0,
+			DhcpRelay: true,
+		},
+		VnetOper: VnetOper{
+			PendingDeviceToDelete: "SDX63200313",
+		},
+	}
+
+	byteData, _ := json.Marshal(voltVnet_test)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_DeleteDbPathKeys",
+			args: args{
+				cntx: context.Background(),
+				data: byteData,
+			},
+			want: string(byteData),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateVnets(tt.args.cntx, tt.args.data); reflect.DeepEqual(got, tt.want) {
+				t.Errorf("MigrateVnets() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateServices(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	vsmap := make(map[string]interface{})
+	vsmap["MecLearning"] = true
+	byteData, _ := json.Marshal(&vsmap)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateServices",
+			args: args{
+				cntx: context.Background(),
+				data: byteData,
+			},
+			want: string(byteData),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateServices(tt.args.cntx, tt.args.data); reflect.DeepEqual(got, tt.want) {
+				t.Errorf("MigrateServices() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateVpvs(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	vpvmap := make(map[string]interface{})
+	byteData, _ := json.Marshal(&vpvmap)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateVpvs",
+			args: args{
+				cntx: context.Background(),
+				data: byteData,
+			},
+			want: string(byteData),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateVpvs(tt.args.cntx, tt.args.data); reflect.DeepEqual(got, tt.want) {
+				t.Errorf("MigrateVpvs() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateMvlans(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	devicesList := make(map[string]OperInProgress)
+	devicesList["SDX6320031"] = opt82
+	mvp := &MvlanProfile{
+		DevicesList: devicesList,
+	}
+	byteData, _ := json.Marshal(mvp)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateMvlans",
+			args: args{
+				cntx: context.Background(),
+				data: byteData,
+			},
+			want: string(byteData),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateMvlans(tt.args.cntx, tt.args.data); reflect.DeepEqual(got, tt.want) {
+				t.Errorf("MigrateMvlans() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpConfs(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	igmpProfile_data := IgmpProfile{
+		ProfileID: "test_profile_id",
+	}
+	b, err := json.Marshal(igmpProfile_data)
+	if err != nil {
+		panic(err)
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "test_MigrateIgmpConfs",
+			args: args{
+				cntx: context.Background(),
+				data: b,
+			},
+			want: "ModuleToBeDeleted",
+		},
+		{
+			name: "unmarshal error",
+			args: args{
+				cntx: context.Background(),
+				data: []byte{},
+			},
+		},
+		{
+			name: "WriteToDb_error",
+			args: args{
+				cntx: context.Background(),
+				data: b,
+			},
+			want: "ModuleToBeDeleted",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			switch tt.name {
+			case "test_MigrateIgmpConfs":
+				dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+				db = dbintf
+				dbintf.EXPECT().PutIgmpProfile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+				if got := MigrateIgmpConfs(tt.args.cntx, tt.args.data); got != tt.want {
+					t.Errorf("MigrateIgmpConfs() = %v, want %v", got, tt.want)
+				}
+			case "unmarshal error":
+				if got := MigrateIgmpConfs(tt.args.cntx, tt.args.data); got != tt.want {
+					t.Errorf("MigrateIgmpConfs() = %v, want %v", got, tt.want)
+				}
+			case "WriteToDb_error":
+				dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+				db = dbintf
+				dbintf.EXPECT().PutIgmpProfile(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error"))
+				if got := MigrateIgmpConfs(tt.args.cntx, tt.args.data); got != tt.want {
+					t.Errorf("MigrateIgmpConfs() = %v, want %v", got, tt.want)
+				}
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpGroups(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateIgmpGroups",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateIgmpGroups(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateIgmpGroups() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpDevices(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateIgmpDevices",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateIgmpDevices(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateIgmpDevices() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpChannels(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateIgmpChannels",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateIgmpChannels(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateIgmpChannels() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpPorts(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateIgmpPorts",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateIgmpPorts(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateIgmpPorts() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateIgmpProfs(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateIgmpProfs",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateIgmpProfs(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateIgmpProfs() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateMcastConfs(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateMcastConfs",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateMcastConfs(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateMcastConfs() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateLogLevels(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateLogLevels",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateLogLevels(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateLogLevels() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateHealth(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateHealth",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateHealth(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateHealth() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigratePonCounters(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigratePonCounters",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigratePonCounters(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigratePonCounters() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateChannelCounters(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateChannelCounters",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateChannelCounters(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateChannelCounters() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateServiceCounters(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateServiceCounters",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateServiceCounters(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateServiceCounters() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateNbDevices(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateNbDevices",
+			args: args{
+				cntx: context.Background(),
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateNbDevices(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateNbDevices() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateFlowHash(t *testing.T) {
+	type args struct {
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateFlowHash(tt.args.data); got != tt.want {
+				t.Errorf("MigrateFlowHash() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateMeters(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateMeters",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateMeters(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateMeters() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDevices(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDevices(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDevices() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDevicePorts(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDevicePorts(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDevicePorts() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDeviceFlows(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDeviceFlows(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDeviceFlows() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDeviceGroups(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDeviceGroups(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDeviceGroups() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDeviceMeters(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDeviceMeters(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDeviceMeters() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestMigrateDeviceFlowHash(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		data []byte
+	}
+	data := []byte{}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				data: data,
+			},
+			want: "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MigrateDeviceFlowHash(tt.args.cntx, tt.args.data); got != tt.want {
+				t.Errorf("MigrateDeviceFlowHash() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestFetchAndMigrateDeviceDBData(t *testing.T) {
+	type args struct {
+		module string
+	}
+	var module string
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				module: module,
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := FetchAndMigrateDeviceDBData(tt.args.module); (err != nil) != tt.wantErr {
+				t.Errorf("FetchAndMigrateDeviceDBData() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDataMigration_WriteToDb(t *testing.T) {
+	type args struct {
+		cntx context.Context
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "Positive_Case_MigrateFlowHash",
+			args: args{
+				cntx: context.Background(),
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			md := &DataMigration{}
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			dbintf.EXPECT().PutMigrationInfo(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+			if err := md.WriteToDb(tt.args.cntx); (err != nil) != tt.wantErr {
+				t.Errorf("DataMigration.WriteToDb() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestGetMigrationInfo(t *testing.T) {
+	type args struct {
+		cntx   context.Context
+		dmInfo *DataMigration
+	}
+	dmInfo := &DataMigration{
+		Version: "v1",
+		Status:  "done",
+	}
+	dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+	db = dbintf
+	dbintf.EXPECT().GetMigrationInfo(gomock.Any()).Return("migrationInfo", nil).AnyTimes()
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "Positive_Case_GetMigrationInfo",
+			args: args{
+				cntx:   context.Background(),
+				dmInfo: dmInfo,
+			},
+			wantErr: true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := GetMigrationInfo(tt.args.cntx, tt.args.dmInfo); (err != nil) != tt.wantErr {
+				t.Errorf("GetMigrationInfo() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestCheckIfMigrationRequired(t *testing.T) {
+	type args struct {
+		ctx context.Context
+	}
+
+	dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+	db = dbintf
+	dbintf.EXPECT().GetMigrationInfo(gomock.Any()).Return("Migration_Info", nil).AnyTimes()
+	dbintf.EXPECT().PutMigrationInfo(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "Positive_Case_CheckIfMigrationRequired",
+			args: args{
+				ctx: context.Background(),
+			},
+			want: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := CheckIfMigrationRequired(tt.args.ctx); got != tt.want {
+				t.Errorf("CheckIfMigrationRequired() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestDataMigration_DelFromDb(t *testing.T) {
+	type args struct {
+		cntx context.Context
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "Positive_Case_DelFromDb",
+			args: args{
+				cntx: context.Background(),
+			},
+		},
+		{
+			name: "Negetive_Case_DelFromDb",
+			args: args{
+				cntx: context.Background(),
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			md := &DataMigration{}
+			switch tt.name {
+			case "Positive_Case_DelFromDb":
+				dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+				db = dbintf
+				dbintf.EXPECT().DelMigrationInfo(gomock.Any()).Return(nil).AnyTimes()
+			case "Negetive_Case_DelFromDb":
+				myError := errors.New("WRONG MESSAGE")
+				dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+				db = dbintf
+				dbintf.EXPECT().DelMigrationInfo(gomock.Any()).Return(myError).AnyTimes()
+			}
+			md.DelFromDb(tt.args.cntx)
+		})
+	}
+}
+
+func TestMigrateDBData(t *testing.T) {
+	type args struct {
+		cntx context.Context
+	}
+	byteArr := []byte{23}
+	dbPathKeysValueMap := map[string]*kvstore.KVPair{}
+	dbPathKeysValueMap["devices/%s/flows/"] = &kvstore.KVPair{
+		Key:   "devices/%s/flows/",
+		Value: byteArr,
+	}
+
+	dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+	db = dbintf
+	dbintf.EXPECT().List(gomock.Any(), gomock.Any()).Return(dbPathKeysValueMap, nil).AnyTimes()
+
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "Positive_Case_DelFromDb",
+			args: args{
+				cntx: context.Background(),
+			},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := MigrateDBData(tt.args.cntx); (err != nil) != tt.wantErr {
+				t.Errorf("MigrateDBData() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
diff --git a/internal/pkg/application/pppoeia_test.go b/internal/pkg/application/pppoeia_test.go
index 45b76fa..fc07d1f 100644
--- a/internal/pkg/application/pppoeia_test.go
+++ b/internal/pkg/application/pppoeia_test.go
@@ -264,3 +264,35 @@
 		})
 	}
 }
+
+func TestAddIaOption(t *testing.T) {
+	type args struct {
+		svc   *VoltService
+		pppoe *layers.PPPoE
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "AddIaOption",
+			args: args{
+				svc: &VoltService{
+					VoltServiceCfg: VoltServiceCfg{
+						CircuitID:    "test_circuit_id",
+						RemoteID:     []byte{1},
+						DataRateAttr: DSLAttrEnabled,
+					},
+				},
+				pppoe: &layers.PPPoE{
+					Options: make(layers.PPPoEOptions, 1),
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			AddIaOption(tt.args.svc, tt.args.pppoe)
+		})
+	}
+}
diff --git a/internal/pkg/application/service_test.go b/internal/pkg/application/service_test.go
index 21bc49d..7622016 100644
--- a/internal/pkg/application/service_test.go
+++ b/internal/pkg/application/service_test.go
@@ -42,11 +42,13 @@
 	Device: test_device,
 }
 var voltDevice = &VoltDevice{
-	Name:            "test_name",
-	State:           controller.DeviceStateUP,
-	FlowAddEventMap: util.NewConcurrentMap(),
-	FlowDelEventMap: util.NewConcurrentMap(),
-	SerialNum:       "test_serial_number",
+	Name:                         "test_name",
+	State:                        controller.DeviceStateUP,
+	FlowAddEventMap:              util.NewConcurrentMap(),
+	FlowDelEventMap:              util.NewConcurrentMap(),
+	SerialNum:                    "test_serial_number",
+	ConfiguredVlanForDeviceFlows: util.NewConcurrentMap(),
+	NniPort:                      "16777216",
 }
 
 var voltMeter = &VoltMeter{
diff --git a/internal/pkg/application/vnets.go b/internal/pkg/application/vnets.go
index 1374faa..2a9d4b2 100644
--- a/internal/pkg/application/vnets.go
+++ b/internal/pkg/application/vnets.go
@@ -20,6 +20,7 @@
 	"encoding/json"
 	"errors"
 	"net"
+	"reflect"
 	"strconv"
 	"sync"
 	"time"
@@ -2260,7 +2261,7 @@
 	}
 	vpvs := vpvsIntf.([]*VoltPortVnet)
 	for i, lvpv := range vpvs {
-		if lvpv == vpv {
+		if reflect.DeepEqual(lvpv, vpv) {
 			logger.Debugw(ctx, "Deleting VPV from port", log.Fields{"Port": vpv.Port, "SVLAN": vpv.SVlan, "CVLAN": vpv.CVlan,
 				"UNIVLAN": vpv.UniVlan})
 
diff --git a/internal/pkg/application/vnets_test.go b/internal/pkg/application/vnets_test.go
index 8942dbb..5a26bea 100644
--- a/internal/pkg/application/vnets_test.go
+++ b/internal/pkg/application/vnets_test.go
@@ -17,7 +17,10 @@
 
 import (
 	"context"
+	"encoding/json"
+	"net"
 	"reflect"
+	"sync"
 	"testing"
 	cntlr "voltha-go-controller/internal/pkg/controller"
 	"voltha-go-controller/internal/pkg/of"
@@ -25,6 +28,7 @@
 	"voltha-go-controller/internal/test/mocks"
 
 	"github.com/golang/mock/gomock"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -645,3 +649,248 @@
 		})
 	}
 }
+
+func TestVoltApplication_RestoreVnetsFromDb(t *testing.T) {
+	type args struct {
+		cntx context.Context
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_RestoreVnetsFromDb",
+			args: args{
+				cntx: context.Background(),
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			vnetsToDelete := map[string]bool{}
+			vnetsToDelete["test_name"] = true
+			va := &VoltApplication{
+				VnetsBySvlan:  util.NewConcurrentMap(),
+				VnetsToDelete: vnetsToDelete,
+			}
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			vnets := map[string]*kvstore.KVPair{}
+			voltVnet.SVlan = of.VlanAny
+			b, err := json.Marshal(voltVnet)
+			if err != nil {
+				panic(err)
+			}
+			vnets["test_device_id"] = &kvstore.KVPair{
+				Key:   "test_device_id",
+				Value: b,
+			}
+			dbintf.EXPECT().PutVnet(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
+			dbintf.EXPECT().GetVnets(tt.args.cntx).Return(vnets, nil)
+			va.RestoreVnetsFromDb(tt.args.cntx)
+		})
+	}
+}
+
+func TestVoltApplication_DeleteDevFlowForDevice(t *testing.T) {
+	type args struct {
+		cntx   context.Context
+		device *VoltDevice
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_DeleteDevFlowForDevice",
+			args: args{
+				cntx: context.Background(),
+				device: &VoltDevice{
+					Name:                         test_device,
+					ConfiguredVlanForDeviceFlows: util.NewConcurrentMap(),
+				},
+			},
+		},
+	}
+	var voltVnet_DeleteDevFlowForDevice = &VoltVnet{
+		Version: "test_version",
+		VnetConfig: VnetConfig{
+			Name:  "test_name",
+			SVlan: of.VlanAny,
+			CVlan: of.VlanAny,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			va.VnetsByName.Store("4096-4096-0", voltVnet_DeleteDevFlowForDevice)
+			//tt.args.device.ConfiguredVlanForDeviceFlows.SyncMap.Store("4096-4069-0", util.NewConcurrentMap())
+			va.DeleteDevFlowForDevice(tt.args.cntx, tt.args.device)
+		})
+	}
+}
+
+func TestVoltApplication_DelVnetFromPort(t *testing.T) {
+	macAdd, _ := net.ParseMAC("ff:ff:ff:ff:ff:ff")
+	vpv_test := []*VoltPortVnet{
+		{
+			Device:   test_device,
+			Port:     "test_port",
+			MacAddr:  macAdd,
+			VnetName: "test_vnet_name",
+		},
+	}
+	type args struct {
+		cntx context.Context
+		port string
+		vpv  *VoltPortVnet
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_DelVnetFromPort",
+			args: args{
+				cntx: context.Background(),
+				port: "test_port",
+				vpv: &VoltPortVnet{
+					Device:   test_device,
+					Port:     "test_port",
+					MacAddr:  macAdd,
+					VnetName: "test_vnet_name",
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			va.VnetsByPort.Store("test_port", vpv_test)
+			dbintf := mocks.NewMockDBIntf(gomock.NewController(t))
+			db = dbintf
+			dbintf.EXPECT().PutVpv(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+			dbintf.EXPECT().DelVpv(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
+			va.VnetsByName.Store("test_vnet_name", &VoltVnet{
+				Version: "test_version",
+			})
+			va.DelVnetFromPort(tt.args.cntx, tt.args.port, tt.args.vpv)
+		})
+	}
+}
+
+func TestVoltApplication_PushDevFlowForVlan(t *testing.T) {
+	type args struct {
+		cntx context.Context
+		vnet *VoltVnet
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "VoltApplication_PushDevFlowForVlan",
+			args: args{
+				cntx: context.Background(),
+				vnet: &VoltVnet{
+					Version: "test_version",
+					VnetConfig: VnetConfig{
+						DevicesList: []string{"test_serialNum"},
+						SVlan:       of.VlanAny,
+					},
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			voltDevice.SerialNum = "test_serialNum"
+			voltDevice.VlanPortStatus.Store(uint16(of.VlanAny), true)
+			voltDevice.Name = test_device
+			va.DevicesDisc.Store(test_device, voltDevice)
+			ga := GetApplication()
+			ga.DevicesDisc.Store(test_device, voltDevice)
+			_ = cntlr.NewController(context.Background(), mocks.NewMockApp(gomock.NewController(t)))
+			va.PushDevFlowForVlan(tt.args.cntx, tt.args.vnet)
+		})
+	}
+}
+
+func TestVoltApplication_PushDevFlowForDevice(t *testing.T) {
+	type args struct {
+		cntx   context.Context
+		device *VoltDevice
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "device.ConfiguredVlanForDeviceFlows is ok",
+			args: args{
+				cntx: context.Background(),
+				device: &VoltDevice{
+					Name:                         test_device,
+					ConfiguredVlanForDeviceFlows: util.NewConcurrentMap(),
+				},
+			},
+		},
+		{
+			name: "device.VlanPortStatus is false",
+			args: args{
+				cntx: context.Background(),
+				device: &VoltDevice{
+					Name:                         test_device,
+					ConfiguredVlanForDeviceFlows: util.NewConcurrentMap(),
+					NniPort:                      "test_nni_port",
+				},
+			},
+		},
+		{
+			name: "device.VlanPortStatus is true",
+			args: args{
+				cntx: context.Background(),
+				device: &VoltDevice{
+					Name:                         test_device,
+					ConfiguredVlanForDeviceFlows: util.NewConcurrentMap(),
+					NniPort:                      "test_nni_port",
+					VlanPortStatus:               sync.Map{},
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			va := &VoltApplication{}
+			switch tt.name {
+			case "device.ConfiguredVlanForDeviceFlows is ok":
+				va.VnetsByName.Store("test_vnet_name", &VoltVnet{
+					Version: "test_version",
+				})
+				tt.args.device.ConfiguredVlanForDeviceFlows.Set("0-0-0", util.NewConcurrentMap())
+				va.PushDevFlowForDevice(tt.args.cntx, tt.args.device)
+			case "device.VlanPortStatus is false":
+				va.VnetsByName.Store("test_vnet_name", &VoltVnet{
+					Version: "test_version",
+				})
+				va.PortsDisc.Store("test_nni_port", &VoltPort{
+					Name: "test_name",
+				})
+				va.PushDevFlowForDevice(tt.args.cntx, tt.args.device)
+			case "device.VlanPortStatus is true":
+				va.VnetsByName.Store("test_vnet_name", &VoltVnet{
+					Version: "test_version",
+					VnetConfig: VnetConfig{
+						SVlan: of.VlanAny,
+					},
+				})
+				va.PortsDisc.Store("test_nni_port", &VoltPort{
+					Name: "test_name",
+				})
+				tt.args.device.VlanPortStatus.Store(uint16(of.VlanAny), true)
+				va.PushDevFlowForDevice(tt.args.cntx, tt.args.device)
+			}
+		})
+	}
+}