diff --git a/Makefile b/Makefile index 96f35f390..761c24af9 100644 --- a/Makefile +++ b/Makefile @@ -362,7 +362,7 @@ unitest_tests: -@rm -rf $(UNITEST_OUTPUT) -@mkdir -p $(UNITEST_OUTPUT) @echo "run unitest tests" - sudo $(ROOT_DIR)/tools/golang/ginkgo.sh \ + sudo $(ROOT_DIR)/tools/golang/ginkgo.sh -gcflags "-l" \ --cover --coverprofile=./coverage.out --covermode set \ --json-report unitestreport.json \ -randomize-suites -randomize-all --keep-going --timeout=1h -p --slow-spec-threshold=120s \ diff --git a/pkg/agent/metrics/metrics_test.go b/pkg/agent/metrics/metrics_test.go new file mode 100644 index 000000000..540f965cd --- /dev/null +++ b/pkg/agent/metrics/metrics_test.go @@ -0,0 +1,12 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "testing" +) + +func TestRegisterMetricCollectors(t *testing.T) { + RegisterMetricCollectors() +} diff --git a/pkg/agent/route/route.go b/pkg/agent/route/route.go index 4c132bbe6..c8cfed4cb 100644 --- a/pkg/agent/route/route.go +++ b/pkg/agent/route/route.go @@ -88,18 +88,18 @@ func (r *RuleRoute) Ensure(linkName string, ipv4, ipv6 *net.IP, table int, mark log.V(1).Info("get link") - err = r.ensureRoute(link, ipv4, netlink.FAMILY_V4, table, log) + err = r.EnsureRoute(link, ipv4, netlink.FAMILY_V4, table, log) if err != nil { return err } - err = r.ensureRoute(link, ipv6, netlink.FAMILY_V6, table, log) + err = r.EnsureRoute(link, ipv6, netlink.FAMILY_V6, table, log) if err != nil { return err } return nil } -func (r *RuleRoute) ensureRoute(link netlink.Link, ip *net.IP, family int, table int, log logr.Logger) error { +func (r *RuleRoute) EnsureRoute(link netlink.Link, ip *net.IP, family int, table int, log logr.Logger) error { log = log.WithValues("family", family, "ip", ip) log.V(1).Info("ensure route") diff --git a/pkg/agent/route/route_test.go b/pkg/agent/route/route_test.go new file mode 100644 index 000000000..f4225ecdf --- /dev/null +++ b/pkg/agent/route/route_test.go @@ -0,0 +1,467 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package route + +import ( + "errors" + "net" + "testing" + + "github.com/agiledragon/gomonkey/v2" + "github.com/go-logr/logr" + "github.com/spidernet-io/egressgateway/pkg/markallocator" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" +) + +var mockLogger = logr.Logger{} + +func TestPurgeStaleRules(t *testing.T) { + cases := map[string]struct { + prepare func() []gomonkey.Patches + expErr bool + }{ + "failed RangeSize": { + prepare: err_PurgeStaleRules_RangeSize, + expErr: true, + }, + "failed RuleListFiltered v4": { + prepare: err_PurgeStaleRules_RuleListFilteredV4, + expErr: true, + }, + "failed RuleListFiltered v6": { + prepare: err_PurgeStaleRules_RuleListFilteredV6, + expErr: true, + }, + "failed RuleDel v4": { + prepare: err_PurgeStaleRules_RuleDelV4, + expErr: true, + }, + "failed RuleDel v6": { + prepare: err_PurgeStaleRules_RuleDelV6, + expErr: true, + }, + "succeed": {}, + } + ruleRoute := NewRuleRoute(mockLogger) + + marks := map[int]struct{}{ + 1: {}, + 2: {}, + } + + baseMark := "1000" + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var err error + var patches = make([]gomonkey.Patches, 0) + if tc.prepare != nil { + patchess := tc.prepare() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + if tc.expErr { + err = ruleRoute.PurgeStaleRules(marks, baseMark) + assert.Error(t, err) + } else { + err = ruleRoute.PurgeStaleRules(marks, baseMark) + assert.NoError(t, err) + } + }) + } +} + +func TestEnsure(t *testing.T) { + cases := map[string]struct { + makePatch func(*RuleRoute) []gomonkey.Patches + prepare func() (string, *net.IP, *net.IP, int, int) + expErr bool + }{ + "zero mark": { + prepare: mock_Ensure__zeroMark, + }, + "failed EnsureRule v4": { + makePatch: err_Ensure_EnsureRuleV4, + prepare: mock_Ensure_params, + expErr: true, + }, + "failed EnsureRule v6": { + makePatch: err_Ensure_EnsureRuleV6, + prepare: mock_Ensure_params, + expErr: true, + }, + "failed LinkByName": { + prepare: mock_Ensure_params, + expErr: true, + }, + "failed EnsureRoute v4": { + makePatch: err_Ensure_EnsureRouteV4, + prepare: mock_Ensure_params, + expErr: true, + }, + "failed EnsureRoute v6": { + makePatch: err_Ensure_EnsureRouteV6, + prepare: mock_Ensure_params, + expErr: true, + }, + "succeeded Ensure": { + makePatch: succ_Ensure, + prepare: mock_Ensure_params, + }, + } + ruleRoute := NewRuleRoute(mockLogger) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var err error + var patches = make([]gomonkey.Patches, 0) + if tc.makePatch != nil { + patchess := tc.makePatch(ruleRoute) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + name, ipv4, ipv6, table, mark := tc.prepare() + if tc.expErr { + err = ruleRoute.Ensure(name, ipv4, ipv6, table, mark) + assert.Error(t, err) + } else { + err = ruleRoute.Ensure(name, ipv4, ipv6, table, mark) + assert.NoError(t, err) + } + }) + } +} + +func TestEnsureRoute(t *testing.T) { + cases := map[string]struct { + makePatch func() []gomonkey.Patches + prepare func() (netlink.Link, *net.IP, int, int, logr.Logger) + expErr bool + }{ + "failed RouteListFiltered v4": { + prepare: mock_EnsureRoute_params, + makePatch: err_EnsureRoute_RouteListFiltered, + expErr: true, + }, + "failed RouteDel v4": { + prepare: mock_EnsureRoute_params, + makePatch: err_EnsureRoute_RouteDel, + expErr: true, + }, + "succeeded EnsureRoute": { + prepare: mock_EnsureRoute_params, + makePatch: succ_EnsureRoute, + }, + "nil ip": { + prepare: mock_EnsureRoute_empty_ip, + makePatch: err_EnsureRoute_empty_ip, + }, + + "failed RouteAdd": { + prepare: mock_EnsureRoute_params, + makePatch: err_EnsureRoute_RouteAdd, + expErr: true, + }, + } + ruleRoute := NewRuleRoute(mockLogger) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var err error + var patches = make([]gomonkey.Patches, 0) + if tc.makePatch != nil { + patchess := tc.makePatch() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + name, ip, family, table, log := tc.prepare() + if tc.expErr { + err = ruleRoute.EnsureRoute(name, ip, family, table, log) + assert.Error(t, err) + } else { + err = ruleRoute.EnsureRoute(name, ip, family, table, log) + assert.NoError(t, err) + } + }) + } +} + +func TestEnsureRule(t *testing.T) { + cases := map[string]struct { + makePatch func() []gomonkey.Patches + prepare func() (int, int, int, logr.Logger) + expErr bool + }{ + "failed RuleListFiltered v4": { + prepare: mock_EnsureRule_params, + makePatch: err_EnsureRule_RuleListFiltered, + expErr: true, + }, + "failed RuleDel": { + prepare: mock_EnsureRule_params, + makePatch: err_EnsureRule_RuleDel, + expErr: true, + }, + "succeeded found": { + prepare: mock_EnsureRule_params, + makePatch: succ_EnsureRule_found, + }, + "failed RuleAdd": { + prepare: mock_EnsureRule_params, + makePatch: err_EnsureRule_RuleAdd, + expErr: true, + }, + "succeeded RuleAdd": { + prepare: mock_EnsureRule_params, + makePatch: succ_EnsureRule_RuleAdd, + }, + + "succeeded multi-RuleDel": { + prepare: mock_EnsureRule_params, + makePatch: succ_EnsureRule_multi_RuleDel, + }, + } + ruleRoute := NewRuleRoute(mockLogger) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var err error + var patches = make([]gomonkey.Patches, 0) + if tc.makePatch != nil { + patchess := tc.makePatch() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + family, table, mark, log := tc.prepare() + if tc.expErr { + err = ruleRoute.EnsureRule(family, table, mark, log) + assert.Error(t, err) + } else { + err = ruleRoute.EnsureRule(family, table, mark, log) + assert.NoError(t, err) + } + }) + } +} + +func err_PurgeStaleRules_RangeSize() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(markallocator.RangeSize, uint64(0), uint64(0), errors.New("some error")) + return []gomonkey.Patches{*patch} +} + +func err_PurgeStaleRules_RuleListFilteredV4() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, nil, errors.New("some error")) + return []gomonkey.Patches{*patch} +} + +func err_PurgeStaleRules_RuleListFilteredV6() []gomonkey.Patches { + patch := gomonkey.ApplyFuncSeq(netlink.RuleListFiltered, []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil, nil}, Times: 1}, + {Values: gomonkey.Params{nil, errors.New("some error")}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func err_PurgeStaleRules_RuleDelV4() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{{Mark: 5000}}, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.RuleDel, errors.New("some error")) + return []gomonkey.Patches{*patch, *patch2} +} + +func err_PurgeStaleRules_RuleDelV6() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{{Mark: 5000}}, nil) + patch2 := gomonkey.ApplyFuncSeq(netlink.RuleDel, []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errors.New("some error")}, Times: 1}, + }) + return []gomonkey.Patches{*patch, *patch2} +} + +func mock_Ensure__zeroMark() (string, *net.IP, *net.IP, int, int) { + return "testlink", nil, nil, 0, 0 +} + +func mock_Ensure_params() (string, *net.IP, *net.IP, int, int) { + ipv4 := net.ParseIP("192.168.0.1") + ipv6 := net.ParseIP("2001:db8::1") + return "testlink", &ipv4, &ipv6, 1000, 1234 +} + +func err_Ensure_EnsureRuleV4(r *RuleRoute) []gomonkey.Patches { + patch := gomonkey.ApplyMethodReturn(r, "EnsureRule", errors.New("some err")) + return []gomonkey.Patches{*patch} +} + +func err_Ensure_EnsureRuleV6(r *RuleRoute) []gomonkey.Patches { + patch := gomonkey.ApplyMethodSeq(r, "EnsureRule", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errors.New("some err")}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func err_Ensure_EnsureRouteV4(r *RuleRoute) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Dummy{}, nil) + patch := gomonkey.ApplyMethodReturn(r, "EnsureRoute", errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch} +} + +func err_Ensure_EnsureRouteV6(r *RuleRoute) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Dummy{}, nil) + patch := gomonkey.ApplyMethodSeq(r, "EnsureRoute", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errors.New("some err")}, Times: 1}, + }) + return []gomonkey.Patches{*patch1, *patch} +} + +func succ_Ensure(r *RuleRoute) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Dummy{}, nil) + patch := gomonkey.ApplyMethodReturn(r, "EnsureRoute", nil) + return []gomonkey.Patches{*patch1, *patch} +} + +func mock_EnsureRoute_params() (netlink.Link, *net.IP, int, int, logr.Logger) { + link := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Index: 1, + Name: "testlink", + }, + } + ipv4 := net.ParseIP("192.168.0.1") + family := 4 + table := 1000 + log := logr.Logger{} + return link, &ipv4, family, table, log +} + +func mock_EnsureRoute_empty_ip() (netlink.Link, *net.IP, int, int, logr.Logger) { + link := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Index: 1, + Name: "testlink", + }, + } + family := 4 + table := 1000 + log := logr.Logger{} + return link, nil, family, table, log +} + +func err_EnsureRoute_RouteListFiltered() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(netlink.RouteListFiltered, nil, errors.New("some error")) + return []gomonkey.Patches{*patch} +} + +func err_EnsureRoute_RouteDel() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RouteListFiltered, []netlink.Route{ + {Table: 1000}, + }, nil) + patch := gomonkey.ApplyFuncReturn(netlink.RouteDel, errors.New("some error")) + return []gomonkey.Patches{*patch, *patch1} +} + +func succ_EnsureRoute() []gomonkey.Patches { + gw := net.ParseIP("192.168.0.1") + patch1 := gomonkey.ApplyFuncReturn(netlink.RouteListFiltered, []netlink.Route{ + {Table: 1000, Gw: gw}, + }, nil) + patch := gomonkey.ApplyFuncReturn(netlink.RouteDel, nil) + return []gomonkey.Patches{*patch, *patch1} +} + +func err_EnsureRoute_empty_ip() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RouteListFiltered, []netlink.Route{ + {Table: 1000}, + }, nil) + patch := gomonkey.ApplyFuncReturn(netlink.RouteDel, nil) + return []gomonkey.Patches{*patch, *patch1} +} + +func err_EnsureRoute_RouteAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RouteListFiltered, []netlink.Route{ + {Table: 1000}, + }, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.RouteDel, nil) + patch3 := gomonkey.ApplyFuncReturn(netlink.RouteAdd, errors.New("some err")) + + return []gomonkey.Patches{*patch2, *patch1, *patch3} +} + +func mock_EnsureRule_params() (int, int, int, logr.Logger) { + family := 4 + table := 1000 + mark := 1234 + log := logr.Logger{} + return family, table, mark, log +} + +func err_EnsureRule_RuleListFiltered() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, nil, errors.New("some error")) + return []gomonkey.Patches{*patch} +} + +func err_EnsureRule_RuleDel() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{ + {Table: 100}, + }, nil) + + patch := gomonkey.ApplyFuncReturn(netlink.RuleDel, errors.New("some error")) + return []gomonkey.Patches{*patch, *patch1} +} + +func succ_EnsureRule_found() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{ + {Table: 1000}, + }, nil) + + return []gomonkey.Patches{*patch1} +} + +func err_EnsureRule_RuleAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{}, nil) + + patch := gomonkey.ApplyFuncReturn(netlink.RuleAdd, errors.New("some error")) + return []gomonkey.Patches{*patch, *patch1} +} + +func succ_EnsureRule_RuleAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{}, nil) + + patch := gomonkey.ApplyFuncReturn(netlink.RuleAdd, nil) + return []gomonkey.Patches{*patch, *patch1} +} + +func succ_EnsureRule_multi_RuleDel() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.RuleListFiltered, []netlink.Rule{ + {Table: 1000}, + {Table: 1000}, + }, nil) + + patch := gomonkey.ApplyFuncSeq(netlink.RuleDel, []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{nil}, Times: 1}, + }) + return []gomonkey.Patches{*patch, *patch1} +} diff --git a/pkg/agent/vxlan/parent_test.go b/pkg/agent/vxlan/parent_test.go index 76ac399bb..cb0731a96 100644 --- a/pkg/agent/vxlan/parent_test.go +++ b/pkg/agent/vxlan/parent_test.go @@ -176,3 +176,102 @@ func case2() TestCase { }, } } + +func Test_GetParentByDefaultRoute(t *testing.T) { + mockLink := NetLink{ + RouteListFiltered: func(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + return nil, errors.New("failed to get routes") + }, + LinkByIndex: func(index int) (netlink.Link, error) { + return nil, errors.New("failed to get link by index") + }, + AddrList: func(link netlink.Link, family int) ([]netlink.Addr, error) { + return nil, errors.New("failed to list link addrs") + }, + } + + // error for getting route list + _, err := GetParentByDefaultRoute(mockLink)(4) + assert.Error(t, err) + + // error for linking + mockLink.RouteListFiltered = func(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + routes := []netlink.Route{ + { + Family: netlink.FAMILY_V4, + LinkIndex: 1, + }, + } + return routes, nil + } + _, err = GetParentByDefaultRoute(mockLink)(4) + assert.Error(t, err) + + // error for addrList + mockLink.LinkByIndex = func(index int) (netlink.Link, error) { + link := &netlink.Dummy{} + return link, nil + } + + _, err = GetParentByDefaultRoute(mockLink)(4) + assert.Error(t, err) + + // error to find parent interface + mockLink.AddrList = func(link netlink.Link, family int) ([]netlink.Addr, error) { + addrs := []netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + } + return addrs, nil + } + + _, err = GetParentByDefaultRoute(mockLink)(4) + assert.Error(t, err) +} + +func Test_GetParentByName(t *testing.T) { + mockLink := NetLink{ + LinkByName: func(name string) (netlink.Link, error) { + return nil, errors.New("failed to get link by name") + }, + AddrList: func(link netlink.Link, family int) ([]netlink.Addr, error) { + return nil, errors.New("failed to list link addrs") + }, + } + + // error to LinkByName + _, err := GetParentByName(mockLink, "eth0")(4) + assert.Error(t, err) + + // error to AddrList + mockLink.LinkByName = func(name string) (netlink.Link, error) { + link := &netlink.Dummy{} + return link, nil + } + + _, err = GetParentByName(mockLink, "eth0")(4) + assert.Error(t, err) + + // error to get parent interface + mockLink.AddrList = func(link netlink.Link, family int) ([]netlink.Addr, error) { + addrs := []netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + } + return addrs, nil + } + + _, err = GetParentByName(mockLink, "eth0")(4) + assert.Error(t, err) + + _, err = GetParentByName(mockLink, "eth0")(6) + assert.Error(t, err) +} diff --git a/pkg/agent/vxlan/vxlan_test.go b/pkg/agent/vxlan/vxlan_test.go index f0db2122a..19bdac504 100644 --- a/pkg/agent/vxlan/vxlan_test.go +++ b/pkg/agent/vxlan/vxlan_test.go @@ -4,9 +4,15 @@ package vxlan import ( + "errors" "net" + "os" + "syscall" "testing" + "github.com/agiledragon/gomonkey/v2" + "github.com/spidernet-io/egressgateway/pkg/ethtool" + "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" ) @@ -119,3 +125,884 @@ func TestVxlan(t *testing.T) { t.Fatal(err) } } + +func Test_WithCustomGetParent(t *testing.T) { + + getParent := func(version int) (*Parent, error) { + return &Parent{}, nil + } + retFunc := WithCustomGetParent(getParent) + + de := New() + retFunc(de) +} + +func Test_New(t *testing.T) { + opts := []func(*Device){ + func(d *Device) { + t.Log("for test") + }, + } + New(opts...) +} + +func Test_EnsureLink(t *testing.T) { + cases := map[string]struct { + prepare func() (name string, vni int, port int, mac net.HardwareAddr, mtu int, + ipv4, ipv6 *net.IPNet, + disableChecksumOffload bool) + patchFunc func(dev *Device) []gomonkey.Patches + customGetParent func(getParent func(version int) (*Parent, error)) func(device *Device) + expErr bool + }{ + "failed RangeSize": { + prepare: mock_EnsureLink_params, + // patchFunc: err_EnsureLink_getParent, + customGetParent: err_EnsureLink_getParent, + expErr: true, + }, + "ipv4 nil, ipv6 not nil ": { + prepare: mock_EnsureLink_nil_ipv4, + // patchFunc: err_EnsureLink_getParent, + customGetParent: err_EnsureLink_getParent, + expErr: true, + }, + "failed ensureLink": { + prepare: mock_EnsureLink_params, + patchFunc: err_EnsureLink_ensureLink, + customGetParent: succ_EnsureLink_getParent, + expErr: true, + }, + "failed ensureAddr v4": { + prepare: mock_EnsureLink_params, + patchFunc: err_EnsureLink_ensureAddr_v4, + customGetParent: succ_EnsureLink_getParent, + expErr: true, + }, + "failed ensureFilter": { + prepare: mock_EnsureLink_params, + patchFunc: err_EnsureLink_ensureFilter, + customGetParent: succ_EnsureLink_getParent, + expErr: true, + }, + "failed EthtoolTXOff": { + prepare: mock_EnsureLink_disableChecksumOffload_on, + patchFunc: err_EnsureLink_EthtoolTXOff, + customGetParent: succ_EnsureLink_getParent, + expErr: true, + }, + "failed LinkSetUp": { + prepare: mock_EnsureLink_disableChecksumOffload_on, + patchFunc: err_EnsureLink_LinkSetUp, + customGetParent: succ_EnsureLink_getParent, + expErr: true, + }, + } + + var dev *Device + var err error + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.customGetParent != nil { + dev = new(Device) + tc.customGetParent(func(version int) (*Parent, error) { + return nil, errors.New("some err") + })(dev) + } else { + dev = New() + } + + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc(dev) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + name, vni, port, mac, mtu, ipv4, ipv6, disableChecksumOffload := tc.prepare() + err = dev.EnsureLink(name, vni, port, mac, mtu, ipv4, ipv6, disableChecksumOffload) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_ensureLink(t *testing.T) { + cases := map[string]struct { + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "failed LinkByName": { + patchFunc: err_ensureLink_LinkByName, + expErr: true, + }, + "nil conflictAttr": { + patchFunc: err_ensureLink_nil_conflictAttr, + }, + "failed LinkDel": { + patchFunc: err_ensureLink_LinkDel, + expErr: true, + }, + "failed second LinkAdd": { + patchFunc: err_ensureLink_second_LinkAdd, + expErr: true, + }, + "failed first LinkAdd": { + patchFunc: err_ensureLink_first_LinkAdd, + expErr: true, + }, + "failed LinkByIndex": { + patchFunc: err_ensureLink_LinkByIndex, + expErr: true, + }, + "failed not Vxlan type": { + patchFunc: err_ensureLink_not_Vxlan_type, + expErr: true, + }, + "succeeded ensureLink": { + patchFunc: succ_ensureLink, + }, + } + + var dev *Device + var vxlan *netlink.Vxlan + var err error + + dev = new(Device) + vxlan = &netlink.Vxlan{} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err = dev.ensureLink(vxlan) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_ensureFilter(t *testing.T) { + dev := new(Device) + patch := gomonkey.ApplyFuncReturn(writeProcSys, errors.New("some err")) + defer patch.Reset() + err := dev.ensureFilter(&net.IPNet{}, &net.IPNet{}) + assert.Error(t, err) +} + +func Test_ListNeigh(t *testing.T) { + cases := map[string]struct { + patchFunc func(*Device) []gomonkey.Patches + expErr bool + }{ + "device not ready": { + patchFunc: err_ListNeigh_notReady, + }, + "failed NeighList": { + patchFunc: err_ListNeigh_NeighList, + expErr: true, + }, + "succeeded ListNeigh": { + patchFunc: succ_ListNeigh, + }, + } + + var dev *Device + var err error + + dev = new(Device) + dev.link = &netlink.Vxlan{} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc(dev) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err = dev.ListNeigh() + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Add(t *testing.T) { + cases := map[string]struct { + setParams func(*Peer) + patchFunc func(*Device) []gomonkey.Patches + expErr bool + }{ + "device not ready": { + patchFunc: err_Add_notReady, + }, + "failed add v6": { + setParams: mock_Add_params_v6(), + patchFunc: err_Add_add, + expErr: true, + }, + "failed add v4": { + setParams: mock_Add_params_v4(), + patchFunc: err_Add_add, + expErr: true, + }, + "failed NeighSet": { + patchFunc: err_Add_NeighSet, + expErr: true, + }, + "succeeded Add": { + patchFunc: succ_Add, + }, + } + + var dev *Device + var err error + var peer Peer + + dev = new(Device) + dev.link = &netlink.Vxlan{} + peer = Peer{} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setParams != nil { + tc.setParams(&peer) + } + + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc(dev) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = dev.Add(peer) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_add(t *testing.T) { + cases := map[string]struct { + patchFunc func(*Device) []gomonkey.Patches + expErr bool + }{ + "failed NeighSet": { + patchFunc: err_add_NeighSet, + expErr: true, + }, + "succeeded add": { + patchFunc: succ_add, + }, + } + + var dev *Device + var err error + var mac net.HardwareAddr + var ip net.IP + + dev = new(Device) + dev.link = &netlink.Vxlan{} + mac, _ = net.ParseMAC("00:00:5e:00:53:01") + ip = net.ParseIP("192.168.0.2") + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc(dev) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = dev.add(mac, ip) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Del(t *testing.T) { + cases := map[string]struct { + setParams func(*Peer) + patchFunc func(*Device) []gomonkey.Patches + expErr bool + }{ + "device not ready": { + patchFunc: err_Del_notReady, + }, + + "failed NeightDel": { + patchFunc: err_Del_NeighDel, + expErr: true, + }, + "succeeded Del": { + patchFunc: succ_Del, + }, + } + + var dev *Device + var err error + var neigh netlink.Neigh + + dev = new(Device) + dev.link = &netlink.Vxlan{} + neigh = netlink.Neigh{} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc(dev) + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = dev.Del(neigh) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_ensureAddr(t *testing.T) { + cases := map[string]struct { + setParams func() (ipn *net.IPNet, link netlink.Link, family int) + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "nil ipn": { + setParams: mock_ensureAddr_nil_ipn, + }, + "failed AddrList": { + setParams: mock_ensureAddr_params, + patchFunc: err_ensureAddr_AddrList, + expErr: true, + }, + "failed AddrDel": { + setParams: mock_ensureAddr_params, + patchFunc: err_ensureAddr_AddrDel, + expErr: true, + }, + "failed AddrAdd": { + setParams: mock_ensureAddr_params, + patchFunc: err_ensureAddr_AddrAdd, + expErr: true, + }, + "succeede ensureAddr": { + setParams: mock_ensureAddr_params, + patchFunc: succ_ensureAddr, + }, + } + + var dev *Device + var err error + + var ipn *net.IPNet + var link netlink.Link + var family int + dev = new(Device) + dev.link = &netlink.Vxlan{} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setParams != nil { + ipn, link, family = tc.setParams() + } else { + t.Fatal("need set params") + } + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = dev.ensureAddr(ipn, link, family) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_notReady(t *testing.T) { + dev := new(Device) + dev.link = nil + ok := dev.notReady() + assert.True(t, ok) +} + +func Test_writeProcSys(t *testing.T) { + cases := map[string]struct { + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "failed OpenFile": { + expErr: true, + }, + + "failed Write": { + patchFunc: err_writeProcSys_Write, + expErr: true, + }, + "failed short length": { + patchFunc: err_writeProcSys_shortLen, + expErr: true, + }, + + "failed Close": { + patchFunc: succ_writeProcSys, + }, + } + + var err error + var path, value string + + path = "foo/bar" + value = "12345" + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var patches = make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patchess := tc.patchFunc() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = writeProcSys(path, value) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func mock_EnsureLink_params() (name string, vni int, port int, mac net.HardwareAddr, mtu int, + ipv4, ipv6 *net.IPNet, + disableChecksumOffload bool) { + name = "vxlan0xx" + vni = 100 + port = 4789 + mac, _ = net.ParseMAC("00:11:22:33:44:55") + mtu = 1500 + ipv4 = &net.IPNet{ + IP: net.IPv4(192, 168, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + ipv6 = &net.IPNet{ + IP: net.ParseIP("2001:db8::1"), + Mask: net.CIDRMask(64, 128), + } + disableChecksumOffload = false + return +} + +func mock_EnsureLink_nil_ipv4() (name string, vni int, port int, mac net.HardwareAddr, mtu int, + ipv4, ipv6 *net.IPNet, + disableChecksumOffload bool) { + name = "vxlan0xx" + vni = 100 + port = 4789 + mac, _ = net.ParseMAC("00:11:22:33:44:55") + mtu = 1500 + ipv4 = nil + ipv6 = &net.IPNet{ + IP: net.ParseIP("2001:db8::1"), + Mask: net.CIDRMask(64, 128), + } + disableChecksumOffload = false + return +} + +func mock_EnsureLink_disableChecksumOffload_on() (name string, vni int, port int, mac net.HardwareAddr, mtu int, + ipv4, ipv6 *net.IPNet, + disableChecksumOffload bool) { + name = "vxlan0xx" + vni = 100 + port = 4789 + mac, _ = net.ParseMAC("00:11:22:33:44:55") + mtu = 1500 + ipv4 = &net.IPNet{ + IP: net.IPv4(192, 168, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + ipv6 = &net.IPNet{ + IP: net.ParseIP("2001:db8::1"), + Mask: net.CIDRMask(64, 128), + } + disableChecksumOffload = true + return +} + +func err_EnsureLink_getParent(getParent func(version int) (*Parent, error)) func(device *Device) { + return WithCustomGetParent(func(version int) (*Parent, error) { + return nil, errors.New("some err") + }) +} + +func succ_EnsureLink_getParent(getParent func(version int) (*Parent, error)) func(device *Device) { + return WithCustomGetParent(func(version int) (*Parent, error) { + return &Parent{}, nil + }) +} + +func err_EnsureLink_ensureLink(dev *Device) []gomonkey.Patches { + patch := gomonkey.NewPatches() + patch.ApplyPrivateMethod(dev, "ensureLink", func(_ *Device) (*netlink.Vxlan, error) { + return nil, errors.New("some err") + }) + + return []gomonkey.Patches{*patch} +} + +func err_EnsureLink_ensureAddr_v4(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "ensureLink", func(_ *Device) (*netlink.Vxlan, error) { + return &netlink.Vxlan{}, nil + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "ensureAddr", func(_ *Device) error { + return errors.New("some error") + }) + + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_EnsureLink_ensureFilter(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "ensureLink", func(_ *Device) (*netlink.Vxlan, error) { + return &netlink.Vxlan{}, nil + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "ensureAddr", func(_ *Device) error { + return nil + }) + patch3 := gomonkey.ApplyPrivateMethod(dev, "ensureFilter", func(_ *Device) error { + return errors.New("some errr") + }) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func err_EnsureLink_EthtoolTXOff(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "ensureLink", func(_ *Device) (*netlink.Vxlan, error) { + return &netlink.Vxlan{}, nil + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "ensureAddr", func(_ *Device) error { + return nil + }) + patch3 := gomonkey.ApplyPrivateMethod(dev, "ensureFilter", func(_ *Device) error { + return nil + }) + patch4 := gomonkey.ApplyFuncReturn(ethtool.EthtoolTXOff, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2, *patch3, *patch4} +} + +func err_EnsureLink_LinkSetUp(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "ensureLink", func(_ *Device) (*netlink.Vxlan, error) { + return &netlink.Vxlan{}, nil + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "ensureAddr", func(_ *Device) error { + return nil + }) + patch3 := gomonkey.ApplyPrivateMethod(dev, "ensureFilter", func(_ *Device) error { + return nil + }) + patch4 := gomonkey.ApplyFuncReturn(ethtool.EthtoolTXOff, nil) + + patch5 := gomonkey.ApplyFuncReturn(netlink.LinkSetUp, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2, *patch3, *patch4, *patch5} +} + +func err_ensureLink_LinkByName() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, syscall.EEXIST) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByName, nil, errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_ensureLink_nil_conflictAttr() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, syscall.EEXIST) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Vxlan{}, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_ensureLink_LinkDel() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, syscall.EEXIST) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Vxlan{VxlanId: 100}, nil) + patch3 := gomonkey.ApplyFuncReturn(netlink.LinkDel, errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func err_ensureLink_second_LinkAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncSeq(netlink.LinkAdd, []gomonkey.OutputCell{ + {Values: gomonkey.Params{syscall.EEXIST}, Times: 1}, + {Values: gomonkey.Params{errors.New("some err")}, Times: 1}, + }) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByName, &netlink.Vxlan{VxlanId: 100}, nil) + patch3 := gomonkey.ApplyFuncReturn(netlink.LinkDel, nil) + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func err_ensureLink_first_LinkAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, errors.New("some err")) + return []gomonkey.Patches{*patch1} +} + +func err_ensureLink_LinkByIndex() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByIndex, nil, errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_ensureLink_not_Vxlan_type() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByIndex, &netlink.Vlan{}, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func succ_ensureLink() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.LinkAdd, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.LinkByIndex, &netlink.Vxlan{}, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_ListNeigh_notReady(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return true + }) + return []gomonkey.Patches{*patch1} +} + +func err_ListNeigh_NeighList(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyFuncReturn(netlink.NeighList, nil, errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch2} +} + +func succ_ListNeigh(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyFuncReturn(netlink.NeighList, nil, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_Add_notReady(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return true + }) + return []gomonkey.Patches{*patch1} +} + +func err_Add_add(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "add", func(_ *Device) error { + return errors.New("some err") + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Add_params_v4() func(*Peer) { + return func(peer *Peer) { + ipv4 := net.ParseIP("192.168.0.2") + peer.IPv4 = &ipv4 + } +} + +func mock_Add_params_v6() func(*Peer) { + return func(peer *Peer) { + ipv6 := net.ParseIP("fddd:12::12") + peer.IPv6 = &ipv6 + } +} + +func err_Add_NeighSet(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "add", func(_ *Device) error { + return nil + }) + patch3 := gomonkey.ApplyFuncReturn(netlink.NeighSet, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func succ_Add(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyPrivateMethod(dev, "add", func(_ *Device) error { + return nil + }) + patch3 := gomonkey.ApplyFuncReturn(netlink.NeighSet, nil) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func err_add_NeighSet(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.NeighSet, errors.New("some err")) + return []gomonkey.Patches{*patch1} +} + +func succ_add(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.NeighSet, nil) + return []gomonkey.Patches{*patch1} +} + +func err_Del_notReady(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return true + }) + return []gomonkey.Patches{*patch1} +} + +func err_Del_NeighDel(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyFuncReturn(netlink.NeighDel, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2} +} + +func succ_Del(dev *Device) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(dev, "notReady", func(_ *Device) bool { + return false + }) + patch2 := gomonkey.ApplyFuncReturn(netlink.NeighDel, nil) + + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_ensureAddr_params() (ipn *net.IPNet, link netlink.Link, family int) { + return &net.IPNet{IP: net.ParseIP("192.168.0.1")}, &netlink.Dummy{}, 4 +} + +func mock_ensureAddr_nil_ipn() (ipn *net.IPNet, link netlink.Link, family int) { + return nil, &netlink.Dummy{}, 4 +} + +func err_ensureAddr_AddrList() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.AddrList, nil, errors.New("some err")) + + return []gomonkey.Patches{*patch1} +} + +func err_ensureAddr_AddrDel() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.AddrList, []netlink.Addr{ + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.2")}}, + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.3")}}, + }, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.AddrDel, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_ensureAddr_AddrAdd() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.AddrList, []netlink.Addr{ + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.2")}}, + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.3")}}, + }, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.AddrDel, nil) + patch3 := gomonkey.ApplyFuncReturn(netlink.AddrAdd, errors.New("some err")) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func succ_ensureAddr() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(netlink.AddrList, []netlink.Addr{ + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.2")}}, + {IPNet: &net.IPNet{IP: net.ParseIP("192.168.0.3")}}, + }, nil) + patch2 := gomonkey.ApplyFuncReturn(netlink.AddrDel, nil) + patch3 := gomonkey.ApplyFuncReturn(netlink.AddrAdd, nil) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func err_writeProcSys_Write() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(os.OpenFile, &os.File{}, nil) + patch2 := gomonkey.ApplyMethodReturn(&os.File{}, "Write", 0, errors.New("some err")) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_writeProcSys_shortLen() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(os.OpenFile, &os.File{}, nil) + patch2 := gomonkey.ApplyMethodReturn(&os.File{}, "Write", 0, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func succ_writeProcSys() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(os.OpenFile, &os.File{}, nil) + patch2 := gomonkey.ApplyMethodReturn(&os.File{}, "Write", 10, nil) + return []gomonkey.Patches{*patch1, *patch2} +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 6e0c7d3a7..051dfa2bb 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -4,6 +4,7 @@ package config import ( + "encoding/json" "fmt" "net" "os" @@ -11,8 +12,11 @@ import ( "github.com/agiledragon/gomonkey/v2" "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" + "github.com/spidernet-io/egressgateway/pkg/iptables" "github.com/stretchr/testify/assert" "go.uber.org/zap" + "gopkg.in/yaml.v3" ctrl "sigs.k8s.io/controller-runtime" ) @@ -115,3 +119,161 @@ func TestLoadConfig(t *testing.T) { } } } + +func Test_PrintPrettyConfig(t *testing.T) { + cfg := &Config{} + patch := gomonkey.NewPatches() + patch.ApplyFuncReturn(json.Marshal, nil, mockError) + defer patch.Reset() + + assert.Panics(t, cfg.PrintPrettyConfig) +} + +func Test_LoadConfig(t *testing.T) { + cases := map[string]struct { + prepare func(t *testing.T) error + setParams func() bool + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "failed GetVersion": { + setParams: mock_LoadConfig_true_isAgent, + patchFunc: err_LoadConfig_GetVersion, + expErr: true, + }, + + "failed BindEnv": { + setParams: mock_LoadConfig_true_isAgent, + patchFunc: err_LoadConfig_BindEnv, + expErr: true, + }, + + "failed viper Unmarshal": { + setParams: mock_LoadConfig_false_isAgent, + patchFunc: err_LoadConfig_viper_Unmarshal, + expErr: true, + }, + + "failed yaml Unmarshal": { + prepare: mock_LoadConfig_prepare, + setParams: mock_LoadConfig_false_isAgent, + patchFunc: err_LoadConfig_yaml_Unmarshal, + expErr: true, + }, + + "failed ParseCIDR v4": { + prepare: mock_LoadConfig_prepare, + setParams: mock_LoadConfig_false_isAgent, + patchFunc: err_LoadConfig_ParseCIDR_v4, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.prepare != nil { + err := tc.prepare(t) + assert.NoError(t, err) + } + var patches = make([]gomonkey.Patches, 0) + + patchess := tc.patchFunc() + patches = append(patches, patchess...) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + + _, err := LoadConfig(tc.setParams()) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + + } +} + +func mock_LoadConfig_prepare(t *testing.T) error { + f, err := os.CreateTemp("", "example-") + if err != nil { + return err + } + + defer f.Close() + _, err = f.WriteString(tmpConfigmapData) + if err != nil { + return err + } + + fmt.Println("Created temp file:", f.Name()) + defer os.Remove(f.Name()) + + err = os.Setenv("CONFIGMAP_PATH", f.Name()) + if err != nil { + return err + } + + kubefile, err := os.CreateTemp("", "") + if err != nil { + return err + } + + defer kubefile.Close() + _, err = kubefile.WriteString("apiVersion: v1\nclusters:\n- cluster:\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1EY3hNakE0TXpZMU5Wb1hEVE16TURjd09UQTRNelkxTlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTWh1CjM3bkdBek5TSGtOb1c0cW1RSzVScXp1VUlldkNjVWF4eWlZbkQwTE9yYkZVZ0lFVnRZZUEvN2psbjJidHVpVXYKVEsrRWliRTFNMUs0OC9IYk1ZVlh2WEtERDhzbTBmZ3lJVDhnc04rbFBwVVpZdFc2cGFXbWFUVnRuUWFQNU8vaAp6MEorcFIxeTkyQTFJL2ZmSVBEa2xZbXdwSldMa1BFU1IvRmRZMm9Bb2UwejFKTjZ4VTlNSGVvcVZnckc5d3dLCnRvTTNTTnFoWXZNa3VVRnFGN0Zrc0U1aUJWRmxEUXJLblZrM0p6Q25PR0tSU0FidVdPS2huZ0g0eUNVVk5ydWIKdWpDbU1iTUFSQ09uazBCUnVvcXZSdnNOeU9SdXpxdXdXSk5lQXNXeGZrT09zT1hZWmFBV0k1Zm4vLzFqbTArVwp4VVZkL0dSVjJ6TjRodW5QNEcwQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFa21rTmY0WGFXT0pZRHhlNWFFVUxMUlVJTVFNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS0tNMnJJMkozb01icGkySjdTdwpxU3hTN1FDcGljQUQyUkFKelBjMStvUmdOZndxbjhZNHVyY2dQazFkNWh3R2k5WTgxU0Nzb2YybitURldyeWNHCkFZOEhBc045M0RyUSt0ZUxWK1QzZ0xpd3BxNEp6QzFLTE1IZ3lDcU1uQXhRYjVkUUN6cFVLNjhaTG1NaVNvVnUKZnd1VTd5WjJyZmtJUUU2MVdsRW03NHQ2VjhkOFpQaVNFTXdTUDlzcE43Q0FHTHNJcElKREg5bEZtYXhjNnNDdwp4UjNUOXhqakE0SjFqSmY5RFdpQWZNWkRFMXhEREd5blZKZGdzeHRiMlFUMHVuRjRTYXZsaDB6alg2NmhrRkErCnhJd0hkWHMrbStTbi9ReWd4YkJKeXB5K0FlNkhZQU0rWE1BUnJtdXlzeHc3SFF4d2ZXM3FsSHVibThDK3JiUEwKcEJRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n server: https://10.6.1.21:6443\n name: kubernetes\ncontexts:\n- context:\n cluster: kubernetes\n user: kubernetes-admin\n name: kubernetes-admin@kubernetes\ncurrent-context: kubernetes-admin@kubernetes\nkind: Config\npreferences: {}\nusers:\n- name: kubernetes-admin\n user:\n client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJRkxqUkdDa0FndlV3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBM01USXdPRE0yTlRWYUZ3MHlOREEzTVRFd09ETTJOVGRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXdDWW1TQVZDNkxncnBXcFYKdUlWank4U0IrQWN0N0VKVmMyMWZXQzVUUlZhMUJ1eXk2aFNiQlhWWldTN0VZM1JySHZXWWlabkhOdmswRXdjKwptMEIzTzM1dFR5ZTYrcisxNGpkZVV6WEdhRVN6cFNma1U1VnN4TGhrWFV1dEVnNTFHZkJCSVFDNVl0cENSY3YwCld5V2daQ0Y4TEdNMkhFL1FjczQzeG9pSE04Yit5cWNaK0hKUlhSTU9kQlZsRlcwZG1KTmN0MlRQOFhGWW43d1cKbDhUWndKUVZ3T2JQVHFhVGUwTFNHY085cm9RMnN3TExtZjNMakltUXpsaVlLTFA3L3JvLzVpMnNQV2FxQnRuTwphcXBveXpNUllRTUtYREd0RmNOS1J1SkIzR0p4d0hJcnBidFNIOVdzamJROXh2c2p1emoyZDhmQkp2ODJNSGF2CmNNTUt4d0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSSkpwRFgrRjJsamlXQThYdVdoRkN5MFZDRApFREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVCtENjNvaVpYSUpMQ1UvWjVBS2laOWRUclgxbk9GalVMSEU3Cm5ZUHZHbXZPZmRqUGpjM2RNeXEycCtmTTlGLzVWOVNBbW1EK2Z3QWpOYk5OVUt6aHlFbVJBeDVsUGxYdk55L3kKdnc3REU0WUZYVE5zT1JLZjNIZ2JKam85dG1MMTFpZlBTZEs1V2dtVnJiMjZQVmMvYzBwWVRMb05oemg0MUNSaQpiMUkwTWpCdU9zQkFmdWRTdU9SQ0EwcFprek5LZkpJSExvTE03OVlBOFhMRWNoU2M2cXg1UWo0RVdvSzNHV0Y0CmxpU0xlRHE5ZXBJOTI1Y1BidU9MU1hGQ2NEN3lyblNLK2Vka2ZlUlhUQjhqSHBGcTlnWnR6VGpJSEhOT3NtdkYKVytaUWJEdXRIWGtrOGpsRXA2ckhsZ1BYNkZjWHB4ZDZOSGtXcXREYnAwTCtVSnFibUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBd0NZbVNBVkM2TGdycFdwVnVJVmp5OFNCK0FjdDdFSlZjMjFmV0M1VFJWYTFCdXl5CjZoU2JCWFZaV1M3RVkzUnJIdldZaVpuSE52azBFd2MrbTBCM08zNXRUeWU2K3IrMTRqZGVVelhHYUVTenBTZmsKVTVWc3hMaGtYVXV0RWc1MUdmQkJJUUM1WXRwQ1JjdjBXeVdnWkNGOExHTTJIRS9RY3M0M3hvaUhNOGIreXFjWgorSEpSWFJNT2RCVmxGVzBkbUpOY3QyVFA4WEZZbjd3V2w4VFp3SlFWd09iUFRxYVRlMExTR2NPOXJvUTJzd0xMCm1mM0xqSW1RemxpWUtMUDcvcm8vNWkyc1BXYXFCdG5PYXFwb3l6TVJZUU1LWERHdEZjTktSdUpCM0dKeHdISXIKcGJ0U0g5V3NqYlE5eHZzanV6ajJkOGZCSnY4Mk1IYXZjTU1LeHdJREFRQUJBb0lCQVFDdXh3U3pKZllDY09OaQpoeUtKd090UmdlRW1sb0V6RWZOZ0Z2Qk82WFJjOGMyZ0N0V0REbE1qMStYNXRReDEyb05GbWVleDRlclBHa1kvCnVLczkxSm1meUJQdG1Cbi8wem5DRnRMNXBVUmZ1MzRjai9pai9wcjlKU3hGb0h4QW5GM3Z4aFczeTB6Vm1lb0cKU3NwdHpmL2lsSUs2YlZQZTFNcXFZdUZnK1BiaUpHQksvalRUVWlpR2luSnRUaVJlNDBRelFLb2FueTI4N0EyaQp5R0Z3OExzTVEvS1NEbzRUNzBrcGFaZXltZnl4L21NR0VRSGJGWlhBK0tBNEJNTC9xS2svV0dBUFQ2UkhhY1BKCnFReHRhRVM5S0EydUdkWU1vcExITHAybmtDMEc3UTAvcTY1dlVNRXduSklkUmk3emhiM0FHWEdGdTdPMUN5M2wKTktqQkxYR2hBb0dCQU40eG5BdkNXa2ZJRjdNOERrU0xQRW8wYnY2RStHc1Rpd1lYZkJkRm1lUGcvQVJNTFBmYQo0TjUxWXFDQkYvK3FkZlhRaGFONmRVcDA0ZTJQNlpZKzd5dDhMeHZzNFZXd2EwN0RPTDVkbjhDUjkwM0M3RFZnCk5IM1g3WXNJQUpRTHlOdjdla3N6UXFveXZKS01aVTBEOS9LR1JxMmxIRVplaEF3MkpjSGJpRXh0QW9HQkFOMWkKVGR4TnFvZXA1RCtESUdOczhFYVRDSnBXM255eElIVkZTRTkzZC9OL3BMZE1iQTlvR2syR0pibDR0b2hyM1dEZQpKSTNpUHRORzVvMytBMG8vTWZtbWp1d1VLbHk5SDFBZVIvaE0vY3lIL1hMQXE0b0NrTDV4NVF5QWRWWWI3c0JtCngxY3ZIdTgxUEs2aTRZalAxMTNHY2dkR3MwV3FsRlY4eFNsV0lFdURBb0dCQUlVMUJMSmdFRFBjbDZqU3BsTWQKamtXR2JjeVU5MEZxYy94dzgrb1h4Z3pDQXhTb2ZvVVJhYUswaVM1a2RuakdQdlhoejF5VXUrQ3BkaEV3Si8vMQpOdm5BOTVVc1RHTk00dWhUVForRERaVXJiVEhuWENrYnhoeHo2V3RpbnNZaTBvWmZtNCtkNmFlVHgwMnNjY2JjClREZlBuR3ZhQXJ1RlNuRHZ2VzhkSi9kNUFvR0JBTWJDODlUUGhrTzNMTTQ1RkdNdjg2bnBhTmZwRm1ndFAwOEsKblJsNzBaNDFBOVh1THpiRjZKZWgwVXpzTERYZllpc09SeE44Qlp2N0ZCUjM4c3crWU1nYjJrWHE5UDIrYnRhbgoyVVg5R2dFQU4zVkh0cnQ2QWlwNlo0TUo4azhWVlE0NU9NLzE1bmd0L0FWdkI3NmxuRjc5UkhOejdwQ2x6ZmZTCnhkR1BHZit4QW9HQUoyYmxNeWhYd1ltMitiOVJzTWxJeGNHOFVKMm9DYVdDMVM0ZGQ1bzI5blJTS1Y4UmlJTG8KZUZaSlpjcDRtMlpNZkxkUVg1clNwRStaVWlFV2xuVWNSZktLSzNQbW9wU3VEK3BVTC9TaWZSbzlCMjNKNGZ6dwovaWVYVkpoajJEemJZSDZHRUtGaUttS1QzbW14WlNBY3B4OGJUYVhlT0IrK3hhdHlMaTJuTWZrPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=") + if err != nil { + return err + } + + defer os.Remove(kubefile.Name()) + + err = os.Setenv("KUBECONFIG", kubefile.Name()) + if err != nil { + return err + } + + config, err := LoadConfig(false) + if nil != err { + return err + } + config.PrintPrettyConfig() + + return nil + +} + +func mock_LoadConfig_true_isAgent() bool { + return true +} +func mock_LoadConfig_false_isAgent() bool { + return false +} + +func err_LoadConfig_GetVersion() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(iptables.GetVersion, iptables.Version{}, mockError) + return []gomonkey.Patches{*patch1} +} + +func err_LoadConfig_BindEnv() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(iptables.GetVersion, iptables.Version{Major: 1, Minor: 6, Patch: 2}, nil) + patch2 := gomonkey.ApplyFuncReturn(viper.BindEnv, mockError) + return []gomonkey.Patches{*patch1, *patch2} +} + +func err_LoadConfig_viper_Unmarshal() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(viper.Unmarshal, mockError) + return []gomonkey.Patches{*patch1} +} + +func err_LoadConfig_yaml_Unmarshal() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(yaml.Unmarshal, mockError) + return []gomonkey.Patches{*patch1} +} + +func err_LoadConfig_ParseCIDR_v4() []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(net.ParseCIDR, net.IP{}, nil, mockError) + + return []gomonkey.Patches{*patch1} +} diff --git a/pkg/controller/egress_cluster_info/egress_cluster_info_test.go b/pkg/controller/egress_cluster_info/egress_cluster_info_test.go new file mode 100644 index 000000000..d2340dc9a --- /dev/null +++ b/pkg/controller/egress_cluster_info/egress_cluster_info_test.go @@ -0,0 +1,1190 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package egressclusterinfo + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/agiledragon/gomonkey/v2" + "github.com/go-logr/logr" + egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1" + "github.com/spidernet-io/egressgateway/pkg/schema" + "github.com/spidernet-io/egressgateway/pkg/utils/ip" + "github.com/stretchr/testify/assert" + calicov1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ErrForMock = errors.New("mock err") + +func Test_NewEgressClusterInfoController(t *testing.T) { + kubeConfig := &rest.Config{} + mgr, _ := ctrl.NewManager(kubeConfig, manager.Options{}) + log := logr.Logger{} + + patch := gomonkey.NewPatches() + patch.ApplyFuncReturn(controller.New, nil, ErrForMock) + defer patch.Reset() + + err := NewEgressClusterInfoController(mgr, log) + assert.Error(t, err) +} + +func Test_eciReconciler_Reconcile(t *testing.T) { + cases := map[string]struct { + getReqFunc func() reconcile.Request + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + expRequeue bool + }{ + "reconcile calico, AutoDetect.PodCidrMode not calico": { + getReqFunc: mock_request_calico, + setReconciler: mock_eciReconciler_info_AutoDetect_PodCidrMode_not_calico, + patchFunc: mock_eciReconciler_getEgressClusterInfo_not_err, + }, + "reconcile no matched kind": { + getReqFunc: mock_request_no_match, + patchFunc: mock_eciReconciler_getEgressClusterInfo_not_err, + }, + "failed status Update IsConflict": { + getReqFunc: mock_request_calico, + setReconciler: mock_eciReconciler_info_AutoDetect_PodCidrMode_calico, + patchFunc: mock_Reconciler_Reconcile_failed_Update, + expRequeue: true, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + // objs = append(objs, egci) + // builder.WithObjects(objs...) + // builder.WithStatusSubresource(objs...) + + r := &eciReconciler{ + // mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: builder.Build(), + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + req := tc.getReqFunc() + ctx := context.TODO() + + res, err := r.Reconcile(ctx, req) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + if tc.expRequeue { + assert.True(t, res.Requeue) + } + }) + } + +} + +func Test_eciReconciler_reconcileEgressClusterInfo(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "not watch node, failed watch node": { + setReconciler: mock_eciReconciler_info_isWatchingNode_false, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_watchSource_err, + expErr: true, + }, + + "not watnch node, failed listNodeIPs": { + setReconciler: mock_eciReconciler_info_isWatchingNode_false, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_listNodeIPs_err, + expErr: true, + }, + + "not watch node, succeeded listNodeIPs": { + setReconciler: mock_eciReconciler_info_isWatchingNode_false, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_listNodeIPs_succ, + }, + + "need stopCheckCalico": { + setReconciler: mock_eciReconciler_info_need_stopCheckCalico, + }, + + "failed checkSomeCniExists": { + setReconciler: mock_eciReconciler_info_AutoDetect_PodCidrMode_auto, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_checkSomeCniExists_err, + expErr: true, + }, + + "autoDetect calico, need watch calico, startCheckCalico": { + setReconciler: mock_eciReconciler_info_AutoDetect_calico_isWatchingCalico_false, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_checkSomeCniExists_err, + }, + + "autoDetect calico, watching calico, failed listCalicoIPPools": { + setReconciler: mock_eciReconciler_info_AutoDetect_calico_isWatchingCalico_true, + patchFunc: mock_Reconciler_reconcileEgressClusterInfo_listCalicoIPPools_err, + expErr: true, + }, + + // "autoDetect ClusterIP, failed getServiceClusterIPRange": { + // setReconciler: mock_eciReconciler_info_AutoDetect_ClusterIP, + // patchFunc: mock_Reconciler_reconcileEgressClusterInfo_getServiceClusterIPRange_err, + // expErr: true, + // }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + + // objs = append(objs, egci) + // builder.WithObjects(objs...) + // builder.WithStatusSubresource(objs...) + + // mgrOpts := manager.Options{ + // Scheme: schema.GetScheme(), + // NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + // return cli, nil + // }, + // } + + mgr, _ := ctrl.NewManager(&rest.Config{}, manager.Options{}) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: kindEGCI + "/", Name: egciName}} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + err := r.reconcileEgressClusterInfo(ctx, req, r.log) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } + +} + +func Test_eciReconciler_reconcileCalicoIPPool(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed get calicoIPPool": { + patchFunc: mock_Reconciler_reconcileCalicoIPPool_Get_err, + expErr: true, + }, + + "failed getCalicoIPPools": { + patchFunc: mock_Reconciler_reconcileCalicoIPPool_getCalicoIPPools_err, + expErr: true, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + + // objs = append(objs, egci) + // builder.WithObjects(objs...) + // builder.WithStatusSubresource(objs...) + + // mgrOpts := manager.Options{ + // Scheme: schema.GetScheme(), + // NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + // return cli, nil + // }, + // } + + mgr, _ := ctrl.NewManager(&rest.Config{}, manager.Options{}) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: kindCalicoIPPool + "/", Name: "xxx"}} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + err := r.reconcileCalicoIPPool(ctx, req, r.log) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_reconcileNode(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed get node": { + patchFunc: mock_Reconciler_reconcileNode_Get_err, + expErr: true, + }, + + "failed getNodeIPs": { + patchFunc: mock_Reconciler_reconcileNode_getNodeIPs_err, + expErr: true, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + + // objs = append(objs, egci) + // builder.WithObjects(objs...) + // builder.WithStatusSubresource(objs...) + + // mgrOpts := manager.Options{ + // Scheme: schema.GetScheme(), + // NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + // return cli, nil + // }, + // } + + mgr, _ := ctrl.NewManager(&rest.Config{}, manager.Options{}) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + req := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: kindCalicoIPPool + "/", Name: "xxx"}} + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + err := r.reconcileNode(ctx, req, r.log) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_listCalicoIPPools(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed List": { + patchFunc: mock_Reconciler_listCalicoIPPools_List_err, + expErr: true, + }, + + "failed IsIPv4Cidr": { + patchFunc: mock_Reconciler_listCalicoIPPools_IsIPv4Cidr_err, + expErr: true, + }, + "failed IsIPv6Cidr": { + patchFunc: mock_Reconciler_listCalicoIPPools_IsIPv6Cidr_err, + expErr: true, + }, + } + + calicoIPPoolV4 := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v4", + }, + Spec: calicov1.IPPoolSpec{ + // CIDR: "xxx", + CIDR: "10.10.0.0/18", + }, + } + calicoIPPoolV6 := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v6", + }, + Spec: calicov1.IPPoolSpec{ + CIDR: "fdee:120::/120", + }, + } + + var objs []client.Object + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + objs = append(objs, calicoIPPoolV4, calicoIPPoolV6) + builder.WithObjects(objs...) + builder.WithStatusSubresource(objs...) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + _, err := r.listCalicoIPPools(ctx) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_getCalicoIPPools(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + objs []client.Object + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed Get": { + objs: mock_IPPoolList(), + patchFunc: mock_Reconciler_reconcileNode_getCalicoIPPools_err, + expErr: true, + }, + + "failed IsIPv4Cidr": { + objs: mock_calicoIPPoolV4(), + patchFunc: mock_Reconciler_listCalicoIPPools_IsIPv4Cidr_err, + expErr: true, + }, + "failed IsIPv6Cidr": { + objs: mock_calicoIPPoolV6(), + patchFunc: mock_Reconciler_listCalicoIPPools_IsIPv6Cidr_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + builder.WithObjects(tc.objs...) + builder.WithStatusSubresource(tc.objs...) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + _, err := r.getCalicoIPPools(ctx, tc.objs[0].GetName()) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_listNodeIPs(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed List": { + patchFunc: mock_Reconciler_listNodeIPs_List_err, + expErr: true, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + _, err := r.listNodeIPs(ctx) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_getNodeIPs(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed Get": { + patchFunc: mock_Reconciler_reconcileNode_getNodeIPs_Get_err, + expErr: true, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + ctx := context.TODO() + + _, err := r.getNodeIPs(ctx, "fakeName") + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_eciReconciler_checkCalicoExists(t *testing.T) { + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + t.Run("isWatchCalico is false", func(t *testing.T) { + r.isWatchCalico.Store(false) + r.checkCalicoExists() + }) + t.Run("failed listCalicoIPPools", func(t *testing.T) { + r.isWatchCalico.Store(true) + + var patches []gomonkey.Patches + defer func() { + for _, p := range patches { + p.Reset() + } + }() + + go func() { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + + time.Sleep(time.Second * 3) + patch2 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + patches = append(patches, *patch2) + + patch3 := gomonkey.ApplyFuncReturn(watchSource, nil) + patches = append(patches, *patch3) + + patch1.Reset() + }() + + time.Sleep(time.Second) + + r.checkCalicoExists() + + }) + + t.Run("failed listCalicoIPPools", func(t *testing.T) { + r.isWatchCalico.Store(true) + + var patches []gomonkey.Patches + defer func() { + for _, p := range patches { + p.Reset() + } + }() + + go func() { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + patches = append(patches, *patch1) + + patch2 := gomonkey.ApplyFuncReturn(watchSource, ErrForMock) + time.Sleep(time.Second * 3) + + patch3 := gomonkey.ApplyFuncReturn(watchSource, nil) + patches = append(patches, *patch3) + + patch2.Reset() + }() + + time.Sleep(time.Second) + + r.checkCalicoExists() + + }) + +} + +func Test_eciReconciler_getServiceClusterIPRange(t *testing.T) { + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + t.Run("failed GetPodByLabel", func(t *testing.T) { + patch := gomonkey.ApplyFuncReturn(GetPodByLabel, nil, ErrForMock) + defer patch.Reset() + _, _, err := r.getServiceClusterIPRange() + assert.Error(t, err) + }) +} + +func Test_eciReconciler_checkSomeCniExists(t *testing.T) { + cases := map[string]struct { + setReconciler func(*eciReconciler) + patchFunc func(*eciReconciler) []gomonkey.Patches + expErr bool + }{ + "failed listCalicoIPPools": { + patchFunc: mock_Reconciler_checkSomeCniExists_listCalicoIPPools_err, + expErr: true, + }, + "failed watchSource": { + patchFunc: mock_Reconciler_checkSomeCniExists_watchSource_err, + expErr: true, + }, + "succeeded watchSource": { + patchFunc: mock_Reconciler_checkSomeCniExists_watchSource_succ, + }, + + "failed getK8sPodCidr": { + patchFunc: mock_Reconciler_checkSomeCniExists_getK8sPodCidr_err, + expErr: true, + }, + + "succeeded getK8sPodCidr": { + patchFunc: mock_Reconciler_checkSomeCniExists_getK8sPodCidr_succ, + }, + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + mgr, _ := ctrl.NewManager(&rest.Config{}, mgrOpts) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.setReconciler != nil { + tc.setReconciler(r) + } + + if tc.patchFunc != nil { + patches := tc.patchFunc(r) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err := r.checkSomeCniExists() + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} +func Test_watchSource(t *testing.T) { + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + + cli := builder.Build() + + mgr, _ := ctrl.NewManager(&rest.Config{}, manager.Options{}) + + r := &eciReconciler{ + mgr: mgr, + eci: new(egressv1.EgressClusterInfo), + log: logr.Logger{}, + k8sPodCidr: make(map[string]egressv1.IPListPair), + v4ClusterCidr: make([]string, 0), + v6ClusterCidr: make([]string, 0), + client: cli, + } + c, _ := controller.New("egressClusterInfo", mgr, + controller.Options{Reconciler: r}) + r.c = c + + t.Run("failed Watch", func(t *testing.T) { + patch := gomonkey.ApplyMethodReturn(c, "Watch", ErrForMock) + defer patch.Reset() + err := watchSource(c, source.Kind(mgr.GetCache(), &egressv1.EgressClusterInfo{}), kindEGCI) + assert.Error(t, err) + }) +} + +func mock_eciReconciler_info_AutoDetect_PodCidrMode_not_calico(r *eciReconciler) { + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeK8s +} + +func mock_eciReconciler_info_AutoDetect_PodCidrMode_calico(r *eciReconciler) { + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeCalico +} + +func mock_request_calico() reconcile.Request { + return reconcile.Request{NamespacedName: types.NamespacedName{Namespace: kindCalicoIPPool + "/", Name: "xxx"}} +} + +func mock_request_no_match() reconcile.Request { + return reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "notMatch" + "/", Name: "xxx"}} +} + +func mock_eciReconciler_getEgressClusterInfo_not_err(r *eciReconciler) []gomonkey.Patches { + patch := gomonkey.ApplyPrivateMethod(r, "getEgressClusterInfo", func(_ *eciReconciler) error { + return nil + }) + return []gomonkey.Patches{*patch} +} + +func mock_Reconciler_Reconcile_failed_Update(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "getEgressClusterInfo", func(_ *eciReconciler) error { + return nil + }) + patch2 := gomonkey.ApplyFuncReturn(reflect.DeepEqual, false) + patch3 := gomonkey.ApplyPrivateMethod(r, "reconcileCalicoIPPool", func(_ *eciReconciler) error { + return nil + }) + patch4 := gomonkey.ApplyFuncReturn(apierrors.IsConflict, true) + return []gomonkey.Patches{*patch1, *patch2, *patch3, *patch4} +} + +func mock_Reconciler_reconcileEgressClusterInfo_watchSource_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(watchSource, ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_eciReconciler_info_isWatchingNode_false(r *eciReconciler) { + r.eci.Spec.AutoDetect.NodeIP = true + r.isWatchingNode.Store(false) +} + +func mock_Reconciler_reconcileEgressClusterInfo_listNodeIPs_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(watchSource, nil) + patch2 := gomonkey.ApplyPrivateMethod(r, "listNodeIPs", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_reconcileEgressClusterInfo_listNodeIPs_succ(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(watchSource, nil) + patch2 := gomonkey.ApplyPrivateMethod(r, "listNodeIPs", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_eciReconciler_info_need_stopCheckCalico(r *eciReconciler) { + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeEmpty + r.isWatchCalico.Store(true) +} + +func mock_eciReconciler_info_AutoDetect_PodCidrMode_auto(r *eciReconciler) { + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeAuto +} + +func mock_Reconciler_reconcileEgressClusterInfo_checkSomeCniExists_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "checkSomeCniExists", func(_ *eciReconciler) error { + return ErrForMock + }) + return []gomonkey.Patches{*patch1} +} + +func mock_eciReconciler_info_AutoDetect_calico_isWatchingCalico_false(r *eciReconciler) { + r.isWatchingCalico.Store(false) + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeCalico +} + +func mock_eciReconciler_info_AutoDetect_calico_isWatchingCalico_true(r *eciReconciler) { + r.isWatchingCalico.Store(true) + r.eci.Spec.AutoDetect.PodCidrMode = egressv1.CniTypeCalico +} + +func mock_Reconciler_reconcileEgressClusterInfo_listCalicoIPPools_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1} +} + +func mock_eciReconciler_info_AutoDetect_ClusterIP(r *eciReconciler) { + r.eci.Spec.AutoDetect.ClusterIP = true +} + +func mock_Reconciler_reconcileEgressClusterInfo_getServiceClusterIPRange_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "getServiceClusterIPRange", func(_ *eciReconciler) (ipv4Range, ipv6Range []string, err error) { + return nil, nil, ErrForMock + }) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_reconcileCalicoIPPool_Get_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_reconcileCalicoIPPool_getCalicoIPPools_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyPrivateMethod(r, "getCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_reconcileNode_Get_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_reconcileNode_getNodeIPs_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyPrivateMethod(r, "getNodeIPs", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_listCalicoIPPools_List_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "List", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_listCalicoIPPools_IsIPv4Cidr_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(ip.IsIPv4Cidr, false, ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_listCalicoIPPools_IsIPv6Cidr_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(ip.IsIPv6Cidr, false, ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_reconcileNode_getCalicoIPPools_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_calicoIPPoolV4() []client.Object { + pool := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v4", + }, + Spec: calicov1.IPPoolSpec{ + // CIDR: "xxx", + CIDR: "10.10.0.0/18", + }, + } + return []client.Object{pool} +} + +func mock_calicoIPPoolV6() []client.Object { + pool := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v6", + }, + Spec: calicov1.IPPoolSpec{ + CIDR: "fdee:120::/120", + }, + } + return []client.Object{pool} + +} + +func mock_IPPoolList() []client.Object { + calicoIPPoolV4 := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v4", + }, + Spec: calicov1.IPPoolSpec{ + // CIDR: "xxx", + CIDR: "10.10.0.0/18", + }, + } + calicoIPPoolV6 := &calicov1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ippool-v6", + }, + Spec: calicov1.IPPoolSpec{ + CIDR: "fdee:120::/120", + }, + } + return []client.Object{calicoIPPoolV4, calicoIPPoolV6} +} + +func mock_Reconciler_listNodeIPs_List_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "List", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_reconcileNode_getNodeIPs_Get_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", ErrForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_checkSomeCniExists_listCalicoIPPools_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1} +} + +func mock_Reconciler_checkSomeCniExists_watchSource_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + patch2 := gomonkey.ApplyFuncReturn(watchSource, ErrForMock) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_checkSomeCniExists_watchSource_succ(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + patch2 := gomonkey.ApplyFuncReturn(watchSource, nil) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_checkSomeCniExists_getK8sPodCidr_err(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + patch2 := gomonkey.ApplyPrivateMethod(r, "getK8sPodCidr", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_Reconciler_checkSomeCniExists_getK8sPodCidr_succ(r *eciReconciler) []gomonkey.Patches { + patch1 := gomonkey.ApplyPrivateMethod(r, "listCalicoIPPools", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, ErrForMock + }) + patch2 := gomonkey.ApplyPrivateMethod(r, "getK8sPodCidr", func(_ *eciReconciler) (map[string]egressv1.IPListPair, error) { + return nil, nil + }) + return []gomonkey.Patches{*patch1, *patch2} +} diff --git a/pkg/controller/egress_cluster_info/tools_test.go b/pkg/controller/egress_cluster_info/tools_test.go new file mode 100644 index 000000000..f96675c1c --- /dev/null +++ b/pkg/controller/egress_cluster_info/tools_test.go @@ -0,0 +1,23 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package egressclusterinfo + +import ( + "testing" + + "github.com/agiledragon/gomonkey/v2" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_GetPodByLabel(t *testing.T) { + t.Run("failed List", func(t *testing.T) { + c := fake.NewFakeClient() + patch := gomonkey.ApplyMethodReturn(c, "List", ErrForMock) + defer patch.Reset() + + _, err := GetPodByLabel(c, map[string]string{"test": "GetPodByLabel"}) + assert.Error(t, err) + }) +} diff --git a/pkg/controller/endpoint/cluster_endpoint_slice_test.go b/pkg/controller/endpoint/cluster_endpoint_slice_test.go index 69909308f..a3cb44be9 100644 --- a/pkg/controller/endpoint/cluster_endpoint_slice_test.go +++ b/pkg/controller/endpoint/cluster_endpoint_slice_test.go @@ -5,19 +5,27 @@ package endpoint import ( "context" + "errors" + "testing" + "time" + + "github.com/agiledragon/gomonkey/v2" + "github.com/go-logr/logr" + "github.com/spidernet-io/egressgateway/pkg/coalescing" "github.com/spidernet-io/egressgateway/pkg/config" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - "testing" "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1" "github.com/spidernet-io/egressgateway/pkg/logger" "github.com/spidernet-io/egressgateway/pkg/schema" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/event" @@ -582,3 +590,1502 @@ func TestNewEgressClusterEpSliceController(t *testing.T) { t.Fatal(err) } } + +var errForMock = errors.New("mock err") + +func Test_NewEgressClusterEpSliceController(t *testing.T) { + cases := map[string]struct { + patchFun func(*testing.T, reconcile.Reconciler, manager.Manager, logr.Logger) []gomonkey.Patches + expErr bool + }{ + "failed NewRequestCache": { + patchFun: mock_NewEgressClusterEpSliceController_NewRequestCache_err, + expErr: true, + }, + "failed New controller": { + patchFun: mock_NewEgressClusterEpSliceController_New_err, + expErr: true, + }, + + "failed controller watch pod": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_pod_err, + expErr: true, + }, + + "failed controller watch namespace": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_namespace_err, + expErr: true, + }, + "failed controller watch egressClusterPolilcy": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_clusterpolicy_err, + expErr: true, + }, + "failed controller watch egressClusterEndpointSlice": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_clusterendpointslice_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var initialObjects []client.Object + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + builder.WithObjects(initialObjects...) + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + cfg := &config.Config{ + KubeConfig: &rest.Config{}, + FileConfig: config.FileConfig{ + MaxNumberEndpointPerSlice: 100, + IPTables: config.IPTables{ + RefreshIntervalSecond: 90, + PostWriteIntervalSecond: 1, + LockTimeoutSecond: 0, + LockProbeIntervalMillis: 50, + LockFilePath: "/run/xtables.lock", + RestoreSupportsLock: true, + }, + Mark: "0x26000000", + GatewayFailover: config.GatewayFailover{ + Enable: true, + TunnelMonitorPeriod: 5, + TunnelUpdatePeriod: 5, + EipEvictionTimeout: 15, + }, + }, + } + log := logger.NewLogger(cfg.EnvConfig.Logger) + mgr, err := ctrl.NewManager(cfg.KubeConfig, mgrOpts) + if err != nil { + t.Fatal(err) + } + r := &endpointClusterReconciler{ + client: mgr.GetClient(), + log: log, + config: cfg, + } + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(t, r, mgr, log) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = NewEgressClusterEpSliceController(mgr, log, cfg) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_Reconcile(t *testing.T) { + cases := map[string]struct { + setMaxNumberEndpointPerSlice func(*endpointClusterReconciler) + setObjs func() ([]client.Object, reconcile.Request) + patchFun func(*testing.T, *endpointClusterReconciler, manager.Manager, logr.Logger) []gomonkey.Patches + expErr bool + }{ + "failed Get clusterPolicy": { + patchFun: mock_endpointClusterReconciler_Reconcile_Get_err, + expErr: true, + }, + "failed Get clusterPolicy notFound": { + patchFun: mock_endpointClusterReconciler_Reconcile_Get_err_notFound, + }, + + "failed listPodsByClusterPolicy": { + patchFun: mock_listPodsByClusterPolicy_err, + expErr: true, + }, + + "failed listClusterEndpointSlices": { + patchFun: mock_listClusterEndpointSlices_err, + expErr: true, + }, + + " needUpdateEndpoint true": { + setObjs: mock_ClusterPolicyObjs, + patchFun: mock_needUpdateEndpoint_true, + }, + + "need to CreateEgressEndpointSlice": { + setObjs: mock_ClusterPolicyObjs_need_create_endpoint, + }, + "need to CreateEgressEndpointSlice less count": { + setMaxNumberEndpointPerSlice: mock_MaxNumberEndpointPerSlice_less_count, + setObjs: mock_ClusterPolicyObjs_need_create_endpoint, + }, + + "need to CreateEgressEndpointSlice but sliceNotChange": { + setObjs: mock_ClusterPolicyObjs_not_change, + }, + + "need to delete endpoint": { + setObjs: mock_ClusterPolicyObjs_need_delete_endpoint, + }, + + "failed to update endpoint": { + setObjs: mock_ClusterPolicyObjs, + patchFun: mock_endpointClusterReconciler_Reconcile_Update_err, + expErr: true, + }, + + "failed to create endpoint": { + setObjs: mock_ClusterPolicyObjs_no_endpoint, + patchFun: mock_endpointClusterReconciler_Reconcile_Create_err, + expErr: true, + }, + + "failed to delete endpoint": { + setObjs: mock_ClusterPolicyObjs_need_delete_endpoint, + patchFun: mock_endpointClusterReconciler_Reconcile_Delete_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var initialObjects []client.Object + var req reconcile.Request + if tc.setObjs != nil { + initialObjects, req = tc.setObjs() + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + builder.WithObjects(initialObjects...) + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + cfg := &config.Config{ + KubeConfig: &rest.Config{}, + FileConfig: config.FileConfig{ + MaxNumberEndpointPerSlice: 100, + IPTables: config.IPTables{ + RefreshIntervalSecond: 90, + PostWriteIntervalSecond: 1, + LockTimeoutSecond: 0, + LockProbeIntervalMillis: 50, + LockFilePath: "/run/xtables.lock", + RestoreSupportsLock: true, + }, + Mark: "0x26000000", + GatewayFailover: config.GatewayFailover{ + Enable: true, + TunnelMonitorPeriod: 5, + TunnelUpdatePeriod: 5, + EipEvictionTimeout: 15, + }, + }, + } + + log := logger.NewLogger(cfg.EnvConfig.Logger) + mgr, err := ctrl.NewManager(cfg.KubeConfig, mgrOpts) + if err != nil { + t.Fatal(err) + } + r := &endpointClusterReconciler{ + client: mgr.GetClient(), + log: log, + config: cfg, + } + + if tc.setMaxNumberEndpointPerSlice != nil { + tc.setMaxNumberEndpointPerSlice(r) + } + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(t, r, mgr, log) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err = r.Reconcile(context.TODO(), req) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_listPodsByClusterPolicy(t *testing.T) { + cases := map[string]struct { + setObjs func() []client.Object + setParams func() *v1beta1.EgressClusterPolicy + patchFun func(c client.Client) []gomonkey.Patches + expErr bool + }{ + "failed LabelSelectorAsSelector when nil namespaceSelector": { + setParams: mock_listPodsByClusterPolicy_nil_NamespaceSelector, + patchFun: mock_listPodsByClusterPolicy_LabelSelectorAsSelector_err, + expErr: true, + }, + + "failed List when nil namespaceSelector": { + setParams: mock_listPodsByClusterPolicy_nil_NamespaceSelector, + patchFun: mock_listPodsByClusterPolicy_List_err, + expErr: true, + }, + + "failed LabelSelectorAsSelector when not nil namespaceSelector": { + setParams: mock_listPodsByClusterPolicy_not_nil_NamespaceSelector, + patchFun: mock_listPodsByClusterPolicy_LabelSelectorAsSelector_err, + expErr: true, + }, + + "failed List when not nil namespaceSelector": { + setParams: mock_listPodsByClusterPolicy_not_nil_NamespaceSelector, + patchFun: mock_listPodsByClusterPolicy_List_err, + expErr: true, + }, + + "failed LabelSelectorAsSelector when not nil namespaceSelector second": { + setObjs: mock_listPodsByClusterPolicy_Objs, + setParams: mock_listPodsByClusterPolicy_not_nil_NamespaceSelector, + patchFun: mock_listPodsByClusterPolicy_LabelSelectorAsSelector_err_second, + expErr: true, + }, + + "succeeded listPodsByClusterPolicy": { + setObjs: mock_listPodsByClusterPolicy_Objs, + setParams: mock_listPodsByClusterPolicy_not_nil_NamespaceSelector, + // patchFun: mock_listPodsByClusterPolicy_List_err_second, + // expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + if tc.setObjs != nil { + builder.WithObjects(tc.setObjs()...) + builder.WithStatusSubresource(tc.setObjs()...) + } + cli := builder.Build() + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(cli) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + policy := tc.setParams() + _, err := listPodsByClusterPolicy(context.TODO(), cli, policy) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_listClusterEndpointSlices(t *testing.T) { + t.Run("failed to LabelSelectorAsSelector", func(t *testing.T) { + p := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + defer p.Reset() + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + _, err := listClusterEndpointSlices(context.TODO(), cli, "testPolicy") + assert.Error(t, err) + }) +} + +func Test_Update(t *testing.T) { + cases := map[string]struct { + in event.UpdateEvent + res bool + }{ + "ObjectOld not Namespace": { + in: event.UpdateEvent{}, + }, + "ObjectNew not Namespace": { + in: event.UpdateEvent{ + ObjectOld: &corev1.Namespace{}, + }, + }, + "ObjectNew Namespace label equal": { + in: event.UpdateEvent{ + ObjectOld: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + }, + ObjectNew: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + p := nsPredicate{} + + ok := p.Update(tc.in) + if tc.res { + assert.True(t, ok) + } else { + assert.False(t, ok) + } + }) + } +} + +func Test_Generic(t *testing.T) { + t.Run("test Generic", func(t *testing.T) { + p := nsPredicate{} + e := event.GenericEvent{} + res := p.Generic(e) + assert.True(t, res) + }) +} + +func Test_enqueueNS(t *testing.T) { + cases := map[string]struct { + in client.Object + objs []client.Object + patchFun func(c client.Client) []gomonkey.Patches + expErr bool + }{ + "failed not namespace obj": { + in: &corev1.Pod{}, + patchFun: mock_enqueueNS_List_err, + expErr: true, + }, + "failed List": { + in: &corev1.Namespace{}, + patchFun: mock_enqueueNS_List_err, + expErr: true, + }, + "failed LabelSelectorAsSelector": { + in: &corev1.Namespace{}, + objs: []client.Object{ + &v1beta1.EgressClusterPolicy{}, + }, + patchFun: mock_enqueueNS_LabelSelectorAsSelector_err, + expErr: true, + }, + "succeeded enqueueNS": { + in: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + }, + objs: []client.Object{ + &v1beta1.EgressClusterPolicy{ + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + if tc.objs != nil { + builder.WithObjects(tc.objs...) + builder.WithStatusSubresource(tc.objs...) + } + cli := builder.Build() + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(cli) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + resF := enqueueNS(cli) + res := resF(context.TODO(), tc.in) + + if tc.expErr { + assert.Nil(t, res) + } + }) + } +} + +func Test_enqueueEGCP(t *testing.T) { + cases := map[string]struct { + in client.Object + objs []client.Object + patchFun func(c client.Client) []gomonkey.Patches + expErr bool + }{ + "failed not pod obj": { + in: &corev1.Node{}, + // patchFun: mock_enqueueNS_List_err, + expErr: true, + }, + + "failed to list policy": { + in: &corev1.Pod{}, + patchFun: mock_enqueueEGCP_List_err_one, + expErr: true, + }, + "failed to list namespace": { + in: &corev1.Pod{}, + patchFun: mock_enqueueEGCP_List_err_two, + expErr: true, + }, + "failed to Get namespace": { + in: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "testns", + Labels: map[string]string{"app": "testpod"}, + }, + }, + objs: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns", + Labels: map[string]string{"name": "testns"}, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpolicy", + Namespace: "testns", + }, + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "testns"}, + }, + }, + }, + }, + }, + patchFun: mock_enqueueEGCP_Get_err, + expErr: true, + }, + + "failed to LabelSelectorAsSelector first": { + in: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "testns", + Labels: map[string]string{"app": "testpod"}, + }, + }, + objs: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns", + Labels: map[string]string{"name": "testns"}, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpolicy", + Namespace: "testns", + }, + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "testns"}, + }, + }, + }, + }, + }, + patchFun: mock_enqueueEGCP_LabelSelectorAsSelector_err_first, + expErr: true, + }, + + "failed to LabelSelectorAsSelector second": { + in: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "testns", + Labels: map[string]string{"app": "testpod"}, + }, + }, + objs: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns", + Labels: map[string]string{"name": "testns"}, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpolicy", + Namespace: "testns", + }, + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "testns"}, + }, + }, + }, + }, + }, + patchFun: mock_enqueueEGCP_LabelSelectorAsSelector_err_second, + expErr: true, + }, + + "ns not match enqueueEGCP": { + in: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "testns1", + Labels: map[string]string{"app": "testpod"}, + }, + }, + objs: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns1", + Labels: map[string]string{"name": "testns1"}, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpolicy", + }, + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "testns3"}, + }, + }, + }, + }, + }, + }, + + "succeeded enqueueEGCP": { + in: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "testns", + Labels: map[string]string{"app": "testpod"}, + }, + }, + objs: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns1", + Labels: map[string]string{"name": "testns1"}, + }, + }, + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns", + Labels: map[string]string{"name": "testns"}, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpolicy", + Namespace: "testns", + }, + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "testns"}, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + if tc.objs != nil { + builder.WithObjects(tc.objs...) + builder.WithStatusSubresource(tc.objs...) + } + cli := builder.Build() + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(cli) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + resF := enqueueEGCP(cli) + res := resF(context.TODO(), tc.in) + + if tc.expErr { + assert.Nil(t, res) + } + }) + } +} + +func mock_NewEgressClusterEpSliceController_NewRequestCache_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(coalescing.NewRequestCache, nil, errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_NewEgressClusterEpSliceController_New_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(controller.New, nil, errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_NewEgressClusterEpSliceController_Watch_pod_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + name := "test-controller" + cache, err := coalescing.NewRequestCache(time.Second) + assert.NoError(t, err) + reduce := coalescing.NewReconciler(r, cache, log) + + c, err := controller.New(name, mgr, controller.Options{Reconciler: reduce}) + assert.NoError(t, err) + patch1 := gomonkey.ApplyFuncReturn(controller.New, c, nil) + patch2 := gomonkey.ApplyMethodReturn(c, "Watch", errForMock) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_NewEgressClusterEpSliceController_Watch_namespace_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + name := "test-controller" + cache, err := coalescing.NewRequestCache(time.Second) + assert.NoError(t, err) + reduce := coalescing.NewReconciler(r, cache, log) + + c, err := controller.New(name, mgr, controller.Options{Reconciler: reduce}) + assert.NoError(t, err) + patch1 := gomonkey.ApplyFuncReturn(controller.New, c, nil) + patch2 := gomonkey.ApplyMethodSeq(c, "Watch", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_NewEgressClusterEpSliceController_Watch_clusterpolicy_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + name := "test-controller" + cache, err := coalescing.NewRequestCache(time.Second) + assert.NoError(t, err) + reduce := coalescing.NewReconciler(r, cache, log) + + c, err := controller.New(name, mgr, controller.Options{Reconciler: reduce}) + assert.NoError(t, err) + patch1 := gomonkey.ApplyFuncReturn(controller.New, c, nil) + patch2 := gomonkey.ApplyMethodSeq(c, "Watch", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_NewEgressClusterEpSliceController_Watch_clusterendpointslice_err(t *testing.T, r reconcile.Reconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + name := "test-controller" + cache, err := coalescing.NewRequestCache(time.Second) + assert.NoError(t, err) + reduce := coalescing.NewReconciler(r, cache, log) + + c, err := controller.New(name, mgr, controller.Options{Reconciler: reduce}) + assert.NoError(t, err) + patch1 := gomonkey.ApplyFuncReturn(controller.New, c, nil) + patch2 := gomonkey.ApplyMethodSeq(c, "Watch", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_endpointClusterReconciler_Reconcile_Get_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_endpointClusterReconciler_Reconcile_Get_err_notFound(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", errForMock) + patch2 := gomonkey.ApplyFuncReturn(apierrors.IsNotFound, true) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listPodsByClusterPolicy_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyFuncReturn(listPodsByClusterPolicy, nil, errForMock) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listClusterEndpointSlices_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyFuncReturn(listPodsByClusterPolicy, nil, nil) + patch3 := gomonkey.ApplyFuncReturn(listClusterEndpointSlices, nil, errForMock) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func mock_needUpdateEndpoint_true(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, true) + + return []gomonkey.Patches{*patch1} +} + +func mock_ClusterPolicyObjs() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + labels2 := map[string]string{"app": "nginx2"} + policyName := "policy1" + policyName2 := "policy2" + + initialObjects := []client.Object{ + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.2"}, + }, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName2, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s2", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName2, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod3", + IPv4: []string{ + "10.6.0.3", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod4", + IPv4: []string{ + "10.6.0.4", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.14"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod5", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + }, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3", + Labels: map[string]string{ + v1beta1.LabelPolicyName: "nopolicy", + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "nopod1", + IPv4: []string{ + "10.7.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "nopod2", + IPv4: []string{ + "10.7.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_ClusterPolicyObjs_no_endpoint() ([]client.Object, reconcile.Request) { + labels1 := map[string]string{"app": "nginx1"} + policyName := "policy1" + initialObjects := []client.Object{ + // &v1beta1.EgressClusterEndpointSlice{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: "s1", + // Labels: map[string]string{ + // v1beta1.LabelPolicyName: policyName, + // }, + // }, + // Endpoints: []v1beta1.EgressEndpoint{ + // { + // Namespace: "defaultxx", + // Pod: "pod1", + // IPv4: []string{ + // "10.6.0.1", "10.6.0.2", + // }, + // IPv6: []string{}, + // Node: "", + // }, + // }, + // }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels1}, + }, + DestSubnet: nil, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + {IP: "10.6.0.4"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_ClusterPolicyObjs_need_create_endpoint() ([]client.Object, reconcile.Request) { + labels1 := map[string]string{"app": "nginx1"} + labels2 := map[string]string{"app": "nginx2"} + policyName := "policy1" + policyName2 := "policy2" + policyName3 := "policy3" + initialObjects := []client.Object{ + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "defaultxx", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName2, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels1}, + }, + DestSubnet: nil, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + {IP: "10.6.0.4"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName3, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "podxxx", + IPv4: []string{ + "10.6.0.1", "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName3, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podxxx", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_MaxNumberEndpointPerSlice_less_count(r *endpointClusterReconciler) { + r.config.FileConfig.MaxNumberEndpointPerSlice = 1 +} + +func mock_ClusterPolicyObjs_not_change() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + policyName := "policy1" + + initialObjects := []client.Object{ + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_ClusterPolicyObjs_need_delete_endpoint() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + policyName := "policy1" + + initialObjects := []client.Object{ + &v1beta1.EgressClusterPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressClusterPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressClusterEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_endpointClusterReconciler_Reconcile_Update_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Update", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, true) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_endpointClusterReconciler_Reconcile_Create_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Create", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, false) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_endpointClusterReconciler_Reconcile_Delete_err(t *testing.T, r *endpointClusterReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Delete", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, false) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listPodsByClusterPolicy_nil_NamespaceSelector() *v1beta1.EgressClusterPolicy { + return &v1beta1.EgressClusterPolicy{ + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + } +} + +func mock_listPodsByClusterPolicy_LabelSelectorAsSelector_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_listPodsByClusterPolicy_List_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(c, "List", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_listPodsByClusterPolicy_not_nil_NamespaceSelector() *v1beta1.EgressClusterPolicy { + return &v1beta1.EgressClusterPolicy{ + Spec: v1beta1.EgressClusterPolicySpec{ + AppliedTo: v1beta1.ClusterAppliedTo{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "ns1"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + } +} + +func mock_listPodsByClusterPolicy_Objs() []client.Object { + + initialObjects := []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns1", + Labels: map[string]string{"name": "ns1"}, + }, + }, + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns2", + Labels: map[string]string{"name": "ns2"}, + }, + }, + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns3", + Labels: map[string]string{"name": "ns3"}, + }, + }, + } + return initialObjects +} + +func mock_listPodsByClusterPolicy_LabelSelectorAsSelector_err_second(c client.Client) []gomonkey.Patches { + patch := gomonkey.ApplyFuncSeq(metav1.LabelSelectorAsSelector, []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil, nil}, Times: 1}, + {Values: gomonkey.Params{nil, errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func mock_listPodsByClusterPolicy_List_err_second(c client.Client) []gomonkey.Patches { + patch := gomonkey.ApplyMethodSeq(c, "List", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func mock_enqueueNS_List_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(c, "List", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_enqueueNS_LabelSelectorAsSelector_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_enqueueEGCP_List_err_one(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(c, "List", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_enqueueEGCP_List_err_two(c client.Client) []gomonkey.Patches { + patch := gomonkey.ApplyMethodSeq(c, "List", []gomonkey.OutputCell{ + {Values: gomonkey.Params{nil}, Times: 1}, + {Values: gomonkey.Params{errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func mock_enqueueEGCP_Get_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(c, "Get", errForMock) + + return []gomonkey.Patches{*patch1} +} + +func mock_enqueueEGCP_LabelSelectorAsSelector_err_first(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + + return []gomonkey.Patches{*patch1} +} + +func mock_enqueueEGCP_LabelSelectorAsSelector_err_second(c client.Client) []gomonkey.Patches { + sel, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "testpod"}, + }) + patch := gomonkey.ApplyFuncSeq(metav1.LabelSelectorAsSelector, []gomonkey.OutputCell{ + {Values: gomonkey.Params{sel, nil}, Times: 1}, + {Values: gomonkey.Params{nil, errForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} diff --git a/pkg/controller/endpoint/endpoint_slice_test.go b/pkg/controller/endpoint/endpoint_slice_test.go index 0f12bfc9c..c616249b2 100644 --- a/pkg/controller/endpoint/endpoint_slice_test.go +++ b/pkg/controller/endpoint/endpoint_slice_test.go @@ -7,14 +7,19 @@ import ( "context" "errors" "fmt" + "testing" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/manager" - "testing" + "github.com/agiledragon/gomonkey/v2" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -624,3 +629,1166 @@ func TestNewEgressEndpointSliceController(t *testing.T) { t.Fatal(err) } } + +func Test_NewEgressEndpointSliceController(t *testing.T) { + cases := map[string]struct { + patchFun func(*testing.T, reconcile.Reconciler, manager.Manager, logr.Logger) []gomonkey.Patches + expErr bool + }{ + "failed NewRequestCache": { + patchFun: mock_NewEgressClusterEpSliceController_NewRequestCache_err, + expErr: true, + }, + "failed New controller": { + patchFun: mock_NewEgressClusterEpSliceController_New_err, + expErr: true, + }, + + "failed controller watch pod": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_pod_err, + expErr: true, + }, + + "failed controller watch egressPolilcy": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_namespace_err, + expErr: true, + }, + "failed controller watch egressEndpointSlice": { + patchFun: mock_NewEgressClusterEpSliceController_Watch_clusterpolicy_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var initialObjects []client.Object + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + builder.WithObjects(initialObjects...) + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + cfg := &config.Config{ + KubeConfig: &rest.Config{}, + FileConfig: config.FileConfig{ + MaxNumberEndpointPerSlice: 100, + IPTables: config.IPTables{ + RefreshIntervalSecond: 90, + PostWriteIntervalSecond: 1, + LockTimeoutSecond: 0, + LockProbeIntervalMillis: 50, + LockFilePath: "/run/xtables.lock", + RestoreSupportsLock: true, + }, + Mark: "0x26000000", + GatewayFailover: config.GatewayFailover{ + Enable: true, + TunnelMonitorPeriod: 5, + TunnelUpdatePeriod: 5, + EipEvictionTimeout: 15, + }, + }, + } + log := logger.NewLogger(cfg.EnvConfig.Logger) + mgr, err := ctrl.NewManager(cfg.KubeConfig, mgrOpts) + if err != nil { + t.Fatal(err) + } + r := &endpointReconciler{ + client: mgr.GetClient(), + log: log, + config: cfg, + } + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(t, r, mgr, log) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err = NewEgressEndpointSliceController(mgr, log, cfg) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_endpointReconciler_Reconcile(t *testing.T) { + cases := map[string]struct { + setMaxNumberEndpointPerSlice func(*endpointReconciler) + setObjs func() ([]client.Object, reconcile.Request) + patchFun func(*testing.T, *endpointReconciler, manager.Manager, logr.Logger) []gomonkey.Patches + expErr bool + }{ + "failed Get policy": { + patchFun: mock_endpointReconciler_Reconcile_Get_err, + expErr: true, + }, + "failed Get policy notFound": { + patchFun: mock_endpointReconciler_Reconcile_Get_err_notFound, + }, + + "failed listPodsByPolicy": { + patchFun: mock_listPodsByPolicy_err, + expErr: true, + }, + + "failed listEndpointSlices": { + patchFun: mock_listEndpointSlices_err, + expErr: true, + }, + + " needUpdateEndpoint true": { + setObjs: mock_policyObjs, + patchFun: mock_endpointReconciler_needUpdateEndpoint_true, + }, + + "need to CreateEgressEndpointSlice": { + setObjs: mock_policyObjs_need_create_endpoint, + }, + + "need to CreateEgressEndpointSlice less count": { + setMaxNumberEndpointPerSlice: mock_endpointReconciler_MaxNumberEndpointPerSlice_less_count, + setObjs: mock_policyObjs_need_create_endpoint, + }, + + "need to CreateEgressEndpointSlice but sliceNotChange": { + setObjs: mock_policyObjs_not_change, + }, + + "need to delete endpoint": { + setObjs: mock_policyObjs_need_delete_endpoint, + }, + + "failed to update endpoint": { + setObjs: mock_policyObjs, + patchFun: mock_endpointReconciler_Reconcile_Update_err, + expErr: true, + }, + + "failed to create endpoint": { + setObjs: mock_policyObjs_no_endpoint, + patchFun: mock_endpointReconciler_Reconcile_Create_err, + expErr: true, + }, + + "failed to delete endpoint": { + setObjs: mock_policyObjs_need_delete_endpoint, + patchFun: mock_endpointReconciler_Reconcile_Delete_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var initialObjects []client.Object + var req reconcile.Request + if tc.setObjs != nil { + initialObjects, req = tc.setObjs() + } + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + builder.WithObjects(initialObjects...) + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + cfg := &config.Config{ + KubeConfig: &rest.Config{}, + FileConfig: config.FileConfig{ + MaxNumberEndpointPerSlice: 100, + IPTables: config.IPTables{ + RefreshIntervalSecond: 90, + PostWriteIntervalSecond: 1, + LockTimeoutSecond: 0, + LockProbeIntervalMillis: 50, + LockFilePath: "/run/xtables.lock", + RestoreSupportsLock: true, + }, + Mark: "0x26000000", + GatewayFailover: config.GatewayFailover{ + Enable: true, + TunnelMonitorPeriod: 5, + TunnelUpdatePeriod: 5, + EipEvictionTimeout: 15, + }, + }, + } + + log := logger.NewLogger(cfg.EnvConfig.Logger) + mgr, err := ctrl.NewManager(cfg.KubeConfig, mgrOpts) + if err != nil { + t.Fatal(err) + } + r := &endpointReconciler{ + client: mgr.GetClient(), + log: log, + config: cfg, + } + + if tc.setMaxNumberEndpointPerSlice != nil { + tc.setMaxNumberEndpointPerSlice(r) + } + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(t, r, mgr, log) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err = r.Reconcile(context.TODO(), req) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_getEndpointSlicePrefix(t *testing.T) { + t.Run("len(validation.NameIsDNSSubdomain(prefix, true)) is not zero", func(t *testing.T) { + p := gomonkey.ApplyFuncReturn(validation.NameIsDNSSubdomain, []string{"1", "2"}) + defer p.Reset() + getEndpointSlicePrefix("xxx") + }) +} + +func Test_newEndpoint(t *testing.T) { + t.Run("ipv6", func(t *testing.T) { + newEndpoint(corev1.Pod{ + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "fddd:dd::2"}, + }, + }, + }) + }) + t.Run("no ip", func(t *testing.T) { + newEndpoint(corev1.Pod{}) + }) +} + +func Test_needUpdateEndpoint(t *testing.T) { + t.Run("ipv6", func(t *testing.T) { + needUpdateEndpoint(corev1.Pod{ + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "fddd:dd::2"}, + }, + }, + }, &v1beta1.EgressEndpoint{}) + }) + t.Run("need update ipv4", func(t *testing.T) { + needUpdateEndpoint(corev1.Pod{ + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.10.0.2"}, + }, + }, + }, &v1beta1.EgressEndpoint{}) + }) + t.Run("need update ipv6", func(t *testing.T) { + needUpdateEndpoint(corev1.Pod{ + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "fddd:dd::2"}, + }, + }, + }, &v1beta1.EgressEndpoint{}) + }) +} + +func Test_sliceEqual(t *testing.T) { + t.Run("length not equal", func(t *testing.T) { + sliceEqual([]string{"x"}, []string{"x", "xx"}) + }) + t.Run("slice not equal", func(t *testing.T) { + sliceEqual([]string{"x1"}, []string{"x2"}) + }) +} + +func Test_initEndpoint(t *testing.T) { + t.Run("length not equal", func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + + mgrOpts := manager.Options{ + Scheme: schema.GetScheme(), + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return cli, nil + }, + } + + cfg := &config.Config{ + KubeConfig: &rest.Config{}, + FileConfig: config.FileConfig{ + MaxNumberEndpointPerSlice: 100, + IPTables: config.IPTables{ + RefreshIntervalSecond: 90, + PostWriteIntervalSecond: 1, + LockTimeoutSecond: 0, + LockProbeIntervalMillis: 50, + LockFilePath: "/run/xtables.lock", + RestoreSupportsLock: true, + }, + Mark: "0x26000000", + GatewayFailover: config.GatewayFailover{ + Enable: true, + TunnelMonitorPeriod: 5, + TunnelUpdatePeriod: 5, + EipEvictionTimeout: 15, + }, + }, + } + log := logger.NewLogger(cfg.EnvConfig.Logger) + mgr, err := ctrl.NewManager(cfg.KubeConfig, mgrOpts) + if err != nil { + t.Fatal(err) + } + r := &endpointReconciler{ + client: mgr.GetClient(), + log: log, + config: cfg, + } + e := r.initEndpoint() + assert.NoError(t, e) + }) +} + +func Test_listPodsByPolicy(t *testing.T) { + cases := map[string]struct { + setObjs func() []client.Object + setParams func() *v1beta1.EgressPolicy + patchFun func(c client.Client) []gomonkey.Patches + expErr bool + }{ + "failed LabelSelectorAsSelector when nil namespaceSelector": { + setParams: mock_listPodsByPolicy, + patchFun: mock_listPodsByPolicy_LabelSelectorAsSelector_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + if tc.setObjs != nil { + builder.WithObjects(tc.setObjs()...) + builder.WithStatusSubresource(tc.setObjs()...) + } + cli := builder.Build() + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(cli) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + policy := tc.setParams() + _, err := listPodsByPolicy(context.TODO(), cli, policy) + + if tc.expErr { + assert.Error(t, err) + } + }) + } +} + +func Test_listEndpointSlices(t *testing.T) { + t.Run("failed to LabelSelectorAsSelector", func(t *testing.T) { + p := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + defer p.Reset() + + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + cli := builder.Build() + _, err := listEndpointSlices(context.TODO(), cli, "testns", "testPolicy") + assert.Error(t, err) + }) +} + +func Test_podPredicate_Create(t *testing.T) { + cases := map[string]struct { + in event.CreateEvent + res bool + }{ + "createEvent not pod": { + in: event.CreateEvent{}, + }, + "pod no ip": { + in: event.CreateEvent{ + Object: &corev1.Pod{}, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + p := podPredicate{} + + ok := p.Create(tc.in) + if tc.res { + assert.True(t, ok) + } else { + assert.False(t, ok) + } + }) + } +} + +func Test_podPredicate_Update(t *testing.T) { + cases := map[string]struct { + in event.UpdateEvent + res bool + }{ + "ObjectOld not pod": { + in: event.UpdateEvent{}, + }, + "ObjectNew not pod": { + in: event.UpdateEvent{ + ObjectOld: &corev1.Pod{}, + }, + }, + "nodeName not equal": { + in: event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: corev1.PodSpec{ + NodeName: "node1", + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: corev1.PodSpec{ + NodeName: "node2", + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + p := podPredicate{} + + ok := p.Update(tc.in) + if tc.res { + assert.True(t, ok) + } else { + assert.False(t, ok) + } + }) + } +} + +func Test_podPredicate_Generic(t *testing.T) { + t.Run("test Generic", func(t *testing.T) { + p := podPredicate{} + e := event.GenericEvent{} + res := p.Generic(e) + assert.True(t, res) + }) +} +func Test_enqueuePod(t *testing.T) { + cases := map[string]struct { + in client.Object + objs []client.Object + patchFun func(c client.Client) []gomonkey.Patches + expErr bool + }{ + "failed not pod obj": { + in: &corev1.Namespace{}, + expErr: true, + }, + "failed List": { + in: &corev1.Pod{}, + patchFun: mock_enqueuePod_List_err, + expErr: true, + }, + "failed LabelSelectorAsSelector": { + in: &corev1.Pod{}, + objs: []client.Object{ + &v1beta1.EgressPolicy{}, + }, + patchFun: mock_enqueuePod_LabelSelectorAsSelector_err, + expErr: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder := fake.NewClientBuilder() + builder.WithScheme(schema.GetScheme()) + if tc.objs != nil { + builder.WithObjects(tc.objs...) + builder.WithStatusSubresource(tc.objs...) + } + cli := builder.Build() + + // patch + if tc.patchFun != nil { + patches := tc.patchFun(cli) + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + resF := enqueuePod(cli) + res := resF(context.TODO(), tc.in) + + if tc.expErr { + assert.Nil(t, res) + } + }) + } +} + +func mock_endpointReconciler_Reconcile_Get_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_endpointReconciler_Reconcile_Get_err_notFound(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", errForMock) + patch2 := gomonkey.ApplyFuncReturn(apierrors.IsNotFound, true) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listPodsByPolicy_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyFuncReturn(listPodsByPolicy, nil, errForMock) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listEndpointSlices_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Get", nil) + patch2 := gomonkey.ApplyFuncReturn(listPodsByPolicy, &corev1.PodList{}, nil) + patch3 := gomonkey.ApplyFuncReturn(listEndpointSlices, nil, errForMock) + + return []gomonkey.Patches{*patch1, *patch2, *patch3} +} + +func mock_policyObjs() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + labels2 := map[string]string{"app": "nginx2"} + policyName := "policy1" + policyName2 := "policy2" + + initialObjects := []client.Object{ + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.2"}, + }, + }, + }, + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName2, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s2", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName2, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod3", + IPv4: []string{ + "10.6.0.3", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod4", + IPv4: []string{ + "10.6.0.4", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.14"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod5", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + }, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3", + Labels: map[string]string{ + v1beta1.LabelPolicyName: "nopolicy", + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "nopod1", + IPv4: []string{ + "10.7.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "nopod2", + IPv4: []string{ + "10.7.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_endpointReconciler_needUpdateEndpoint_true(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, true) + + return []gomonkey.Patches{*patch1} +} + +func mock_policyObjs_need_create_endpoint() ([]client.Object, reconcile.Request) { + labels1 := map[string]string{"app": "nginx1"} + labels2 := map[string]string{"app": "nginx2"} + policyName := "policy1" + policyName2 := "policy2" + policyName3 := "policy3" + initialObjects := []client.Object{ + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "defaultxx", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName2, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels1}, + }, + DestSubnet: nil, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + {IP: "10.6.0.4"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName3, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "podxxx", + IPv4: []string{ + "10.6.0.1", "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName3, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels2}, + }, + DestSubnet: nil, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podxxx", + Namespace: "default", + Labels: labels2, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.5"}, + {IP: "10.6.0.6"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_endpointReconciler_MaxNumberEndpointPerSlice_less_count(r *endpointReconciler) { + r.config.FileConfig.MaxNumberEndpointPerSlice = 1 +} + +func mock_policyObjs_not_change() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + policyName := "policy1" + + initialObjects := []client.Object{ + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "default", + Labels: labels, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_policyObjs_need_delete_endpoint() ([]client.Object, reconcile.Request) { + labels := map[string]string{"app": "nginx1"} + policyName := "policy1" + + initialObjects := []client.Object{ + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels}, + }, + DestSubnet: nil, + }, + }, + &v1beta1.EgressEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s1", + Labels: map[string]string{ + v1beta1.LabelPolicyName: policyName, + }, + }, + Endpoints: []v1beta1.EgressEndpoint{ + { + Namespace: "default", + Pod: "pod1", + IPv4: []string{ + "10.6.0.1", + }, + IPv6: []string{}, + Node: "", + }, + { + Namespace: "default", + Pod: "pod2", + IPv4: []string{ + "10.6.0.2", + }, + IPv6: []string{}, + Node: "", + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_endpointReconciler_Reconcile_Update_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Update", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, true) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_policyObjs_no_endpoint() ([]client.Object, reconcile.Request) { + labels1 := map[string]string{"app": "nginx1"} + policyName := "policy1" + initialObjects := []client.Object{ + &v1beta1.EgressPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: v1beta1.EgressPolicySpec{ + EgressGatewayName: "", + EgressIP: v1beta1.EgressIP{}, + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{MatchLabels: labels1}, + }, + DestSubnet: nil, + }, + }, + + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.1"}, + {IP: "10.6.0.2"}, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + Labels: labels1, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.6.0.3"}, + {IP: "10.6.0.4"}, + }, + }, + }, + } + return initialObjects, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "", Name: policyName}} +} + +func mock_endpointReconciler_Reconcile_Create_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Create", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, false) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_endpointReconciler_Reconcile_Delete_err(t *testing.T, r *endpointReconciler, mgr manager.Manager, log logr.Logger) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(r.client, "Delete", errForMock) + patch2 := gomonkey.ApplyFuncReturn(needUpdateEndpoint, false) + return []gomonkey.Patches{*patch1, *patch2} +} + +func mock_listPodsByPolicy() *v1beta1.EgressPolicy { + return &v1beta1.EgressPolicy{ + Spec: v1beta1.EgressPolicySpec{ + AppliedTo: v1beta1.AppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + } +} + +func mock_listPodsByPolicy_LabelSelectorAsSelector_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_enqueuePod_List_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyMethodReturn(c, "List", errForMock) + return []gomonkey.Patches{*patch1} +} + +func mock_enqueuePod_LabelSelectorAsSelector_err(c client.Client) []gomonkey.Patches { + patch1 := gomonkey.ApplyFuncReturn(metav1.LabelSelectorAsSelector, nil, errForMock) + return []gomonkey.Patches{*patch1} +} diff --git a/pkg/markallocator/allocator_test.go b/pkg/markallocator/allocator_test.go index c585c3e7b..4144544ad 100644 --- a/pkg/markallocator/allocator_test.go +++ b/pkg/markallocator/allocator_test.go @@ -1,16 +1,21 @@ // Copyright 2022 Authors of spidernet-io // SPDX-License-Identifier: Apache-2.0 -package markallocator_test +package markallocator import ( - "github.com/spidernet-io/egressgateway/pkg/markallocator" + "errors" + "math/big" "testing" + + "github.com/agiledragon/gomonkey/v2" + "github.com/cilium/ipam/service/allocator" + "github.com/stretchr/testify/assert" ) func TestAllocatorMarkRange(t *testing.T) { - Allocator, _ := markallocator.NewAllocatorMarkRange("0x26000000") + Allocator, _ := NewAllocatorMarkRange("0x26000000") _, _ = Allocator.AllocateNext() _ = Allocator.Has("0x26000000") _ = Allocator.Allocate("0x26000000") @@ -21,3 +26,360 @@ func TestAllocatorMarkRange(t *testing.T) { _ = Allocator.Allocate("0x23000000") _ = Allocator.Release("0x23000000") } + +var ErrForMock = errors.New("mock err") + +func Test_NewAllocatorMarkRange(t *testing.T) { + cases := map[string]struct { + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "failed RangeSize": { + patchFunc: mock_NewAllocatorMarkRange_RangeSize, + expErr: true, + }, + + "failed bigForMark": { + patchFunc: mock_NewAllocatorMarkRange_bigForMark, + expErr: true, + }, + } + + mark := "0x13413" + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc() + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err := NewAllocatorMarkRange(mark) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_bigForMark(t *testing.T) { + patch := gomonkey.NewPatches() + patch.ApplyFuncReturn(Parse, uint64(0), ErrForMock) + defer patch.Reset() + + mark := "0x13413" + _, err := bigForMark(mark) + assert.Error(t, err) +} + +func Test_RangeSize(t *testing.T) { + cases := map[string]struct { + patchFunc func() []gomonkey.Patches + expErr bool + }{ + "failed Parse start": { + patchFunc: mock_RangeSize_Parse_start, + expErr: true, + }, + "failed Parse end": { + patchFunc: mock_RangeSize_Parse_end, + expErr: true, + }, + } + + mark := "0x13413" + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc() + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, _, err := RangeSize(mark) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Range_Allocate(t *testing.T) { + cases := map[string]struct { + patchFunc func(r *Range) []gomonkey.Patches + expErr bool + }{ + "failed Parse": { + patchFunc: mock_Range_Allocate_Parse, + expErr: true, + }, + "failed Allocate err": { + patchFunc: mock_Range_Allocate_Allocate_err, + expErr: true, + }, + "failed Allocate false": { + patchFunc: mock_Range_Allocate_Allocate_false, + expErr: true, + }, + "succeeded Allocate": {}, + } + + r := Range{start: uint64(0), end: uint64(512), base: big.NewInt(100), max: 200, alloc: allocator.NewAllocationMap(200, "")} + mark := "0x100" + // r, _ := NewAllocatorMarkRange(mark) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc(&r) + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err := r.Allocate(mark) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Range_AllocateNext(t *testing.T) { + cases := map[string]struct { + patchFunc func(r *Range) []gomonkey.Patches + expErr bool + }{ + "failed AllocateNext err": { + patchFunc: mock_Range_AllocateNext_AllocateNext_err, + expErr: true, + }, + "failed AllocateNext false": { + patchFunc: mock_Range_AllocateNext_AllocateNext_false, + expErr: true, + }, + } + + r := Range{start: uint64(0), end: uint64(512), base: big.NewInt(100), max: 200, alloc: allocator.NewAllocationMap(200, "")} + // mark := "0x100" + // r, _ := NewAllocatorMarkRange(mark) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc(&r) + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + _, err := r.AllocateNext() + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Range_Release(t *testing.T) { + cases := map[string]struct { + patchFunc func(r *Range) []gomonkey.Patches + expErr bool + }{ + "failed Parse": { + patchFunc: mock__Range_Release_Parse, + expErr: true, + }, + "succeeded Release": {}, + } + + r := Range{start: uint64(0), end: uint64(512), base: big.NewInt(100), max: 200, alloc: allocator.NewAllocationMap(200, "")} + mark := "0x100" + // r, _ := NewAllocatorMarkRange(mark) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc(&r) + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + err := r.Release(mark) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_Range_Has(t *testing.T) { + cases := map[string]struct { + patchFunc func(r *Range) []gomonkey.Patches + expOK bool + }{ + "failed Parse": { + patchFunc: mock__Range_Has_Parse, + expOK: true, + }, + "succeeded Parse": {}, + } + + r := Range{start: uint64(0), end: uint64(512), base: big.NewInt(100), max: 200, alloc: allocator.NewAllocationMap(200, "")} + mark := "0x100" + // r, _ := NewAllocatorMarkRange(mark) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc(&r) + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + b := r.Has(mark) + if tc.expOK { + assert.False(t, b) + } + }) + } +} + +func Test_Range_contains(t *testing.T) { + cases := map[string]struct { + patchFunc func(r *Range) []gomonkey.Patches + expOK bool + }{ + "succeeded contains": {}, + } + + r := Range{start: uint64(0), end: uint64(512), base: big.NewInt(100), max: 200, alloc: allocator.NewAllocationMap(200, "")} + mark := uint64(100) + // r, _ := NewAllocatorMarkRange(mark) + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + patches := make([]gomonkey.Patches, 0) + if tc.patchFunc != nil { + patches = tc.patchFunc(&r) + + defer func() { + for _, p := range patches { + p.Reset() + } + }() + } + + b, _ := r.contains(mark) + if tc.expOK { + assert.False(t, b) + } + }) + } +} + +func mock_NewAllocatorMarkRange_RangeSize() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(RangeSize, uint64(0), uint64(0), ErrForMock) + return []gomonkey.Patches{*patch} +} + +func mock_NewAllocatorMarkRange_bigForMark() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(bigForMark, nil, ErrForMock) + return []gomonkey.Patches{*patch} +} + +func mock_RangeSize_Parse_start() []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(Parse, uint64(0), ErrForMock) + return []gomonkey.Patches{*patch} +} + +func mock_RangeSize_Parse_end() []gomonkey.Patches { + patch := gomonkey.ApplyFuncSeq(Parse, []gomonkey.OutputCell{ + {Values: gomonkey.Params{uint64(1234567), nil}, Times: 1}, + {Values: gomonkey.Params{uint64(1234567), ErrForMock}, Times: 1}, + }) + return []gomonkey.Patches{*patch} +} + +func mock_Range_Allocate_Parse(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(Parse, uint64(0), ErrForMock) + return []gomonkey.Patches{*patch} +} + +func mock_Range_Allocate_Allocate_err(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyPrivateMethod(r.alloc, "Allocate", func(_ allocator.Interface) (bool, error) { + return false, ErrForMock + }) + return []gomonkey.Patches{*patch} +} + +func mock_Range_Allocate_Allocate_false(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyPrivateMethod(r.alloc, "Allocate", func(_ allocator.Interface) (bool, error) { + return false, nil + }) + return []gomonkey.Patches{*patch} +} + +func mock_Range_AllocateNext_AllocateNext_err(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyPrivateMethod(r.alloc, "AllocateNext", func(_ allocator.Interface) (int, bool, error) { + return 0, false, ErrForMock + }) + return []gomonkey.Patches{*patch} +} + +func mock_Range_AllocateNext_AllocateNext_false(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyPrivateMethod(r.alloc, "AllocateNext", func(_ allocator.Interface) (int, bool, error) { + return 0, false, nil + }) + return []gomonkey.Patches{*patch} +} + +func mock__Range_Release_Parse(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(Parse, uint64(0), ErrForMock) + return []gomonkey.Patches{*patch} +} + +func mock__Range_Has_Parse(r *Range) []gomonkey.Patches { + patch := gomonkey.ApplyFuncReturn(Parse, uint64(0), ErrForMock) + return []gomonkey.Patches{*patch} +}