diff --git a/.golangci.yaml b/.golangci.yaml index ad93cf67f1..3d80038e42 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -135,6 +135,7 @@ linters: allow-packages: - autorecovery - apiserver + - c2cc - certchains - cmd - components diff --git a/go.mod b/go.mod index d4a46141c7..8d9a2d94f1 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/openshift/build-machinery-go v0.0.0-20251023084048-5d77c1a5e5af github.com/openshift/client-go v0.0.0-20260306160707-3935d929fc7d github.com/openshift/library-go v0.0.0-20260303171201-5d9eb6295ff6 + github.com/ovn-kubernetes/libovsdb v0.8.2-0.20260302130604-c07ce22366ac github.com/pkg/errors v0.9.1 // indirect github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 @@ -21,10 +22,12 @@ require ( golang.org/x/sys v0.42.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/kube-openapi v0.0.0-20260304202019-5b3e3fdb0acf + sigs.k8s.io/knftables v0.0.20 sigs.k8s.io/yaml v1.6.0 ) require ( + github.com/cenkalti/backoff/v4 v4.3.0 github.com/coreos/go-systemd/v22 v22.7.0 github.com/evanphx/json-patch v4.12.0+incompatible github.com/fsnotify/fsnotify v1.9.0 @@ -68,6 +71,8 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cenkalti/hub v1.0.2 // indirect + github.com/cenkalti/rpc2 v1.0.5 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/container-storage-interface/spec v1.9.0 // indirect github.com/containerd/containerd/api v1.9.0 // indirect @@ -86,6 +91,7 @@ require ( github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-ldap/ldap/v3 v3.4.11 // indirect @@ -106,6 +112,9 @@ require ( github.com/go-openapi/swag/stringutils v0.25.5 // indirect github.com/go-openapi/swag/typeutils v0.25.5 // indirect github.com/go-openapi/swag/yamlutils v0.25.5 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -121,6 +130,7 @@ require ( github.com/jonboulle/clockwork v0.5.0 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libopenstorage/openstorage v1.0.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect diff --git a/go.sum b/go.sum index 56e2129632..0154d4e35c 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,14 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/hub v1.0.2 h1:Nqv9TNaA9boeO2wQFW8o87BY3zKthtnzXmWGmJqhAV8= +github.com/cenkalti/hub v1.0.2/go.mod h1:8LAFAZcCasb83vfxatMUnZHRoQcffho2ELpHb+kaTJU= +github.com/cenkalti/rpc2 v1.0.5 h1:T6l4SS3ja3eaJfRyZrn7Oco/PSx/pr3YK5cjCgLVLTk= +github.com/cenkalti/rpc2 v1.0.5/go.mod h1:2yfU5b86vOr16+iY1jN3MvT6Kxc9Nf8j5iZWwUf7iaw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -107,6 +113,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -160,6 +168,14 @@ github.com/go-openapi/testify/enable/yaml/v2 v2.4.0 h1:7SgOMTvJkM8yWrQlU8Jm18VeD github.com/go-openapi/testify/enable/yaml/v2 v2.4.0/go.mod h1:14iV8jyyQlinc9StD7w1xVPW3CO3q1Gj04Jy//Kw4VM= github.com/go-openapi/testify/v2 v2.4.0 h1:8nsPrHVCWkQ4p8h1EsRVymA2XABB4OT40gcvAu+voFM= github.com/go-openapi/testify/v2 v2.4.0/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -244,6 +260,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -315,6 +333,8 @@ github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251120221002-696928a6a0d7/go.mod github.com/openshift/route-controller-manager v0.0.0-20260211095309-624742d93f3a h1:jBmugEVHpoRmyNBFb7ZBLeurE5+snu9NXS6bf57iiqA= github.com/openshift/route-controller-manager v0.0.0-20260211095309-624742d93f3a/go.mod h1:KNxrLlGIh4j2iUAOax5Y9f5rN3vIo8w9QTGKHEQlvFo= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= +github.com/ovn-kubernetes/libovsdb v0.8.2-0.20260302130604-c07ce22366ac h1:D7Ex9/u5HMz+xvqel1RCCO1AxVG7XRAx9AcP02/nyzk= +github.com/ovn-kubernetes/libovsdb v0.8.2-0.20260302130604-c07ce22366ac/go.mod h1:x2keWyG0K1WmZeZLRh+z4fWwcqp99Yu9/HAiMucj5D0= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -530,6 +550,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/knftables v0.0.20 h1:eU2NWpgcJ/wgb4Fy0cX3klK6nDjERvZRdYgkORLU0Tc= +sigs.k8s.io/knftables v0.0.20/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= diff --git a/pkg/cmd/run.go b/pkg/cmd/run.go index bd32bb8782..2e7512ed5e 100644 --- a/pkg/cmd/run.go +++ b/pkg/cmd/run.go @@ -16,6 +16,7 @@ import ( "github.com/openshift/microshift/pkg/admin/prerun" "github.com/openshift/microshift/pkg/config" "github.com/openshift/microshift/pkg/controllers" + "github.com/openshift/microshift/pkg/controllers/c2cc" "github.com/openshift/microshift/pkg/gdp" "github.com/openshift/microshift/pkg/kustomize" "github.com/openshift/microshift/pkg/loadbalancerservice" @@ -237,6 +238,7 @@ func RunMicroshift(cfg *config.Config) error { util.Must(m.AddService(controllers.NewTelemetryManager(cfg))) util.Must(m.AddService(controllers.NewHostsWatcherManager(cfg))) util.Must(m.AddService(gdp.NewGenericDevicePlugin(cfg))) + util.Must(m.AddService(c2cc.NewC2CCRouteManager(cfg))) // Storing and clearing the env, so other components don't send the READY=1 until MicroShift is fully ready notifySocket := os.Getenv("NOTIFY_SOCKET") diff --git a/pkg/config/c2cc.go b/pkg/config/c2cc.go index cf758251e6..11f7ad2ef8 100644 --- a/pkg/config/c2cc.go +++ b/pkg/config/c2cc.go @@ -49,6 +49,15 @@ func (c *C2CC) IsEnabled() bool { return len(c.RemoteClusters) > 0 } +func (c *C2CC) AllRemoteCIDRs() []string { + cidrs := make([]string, 0, len(c.RemoteClusters)*4) + for _, rc := range c.RemoteClusters { + cidrs = append(cidrs, rc.ClusterNetwork...) + cidrs = append(cidrs, rc.ServiceNetwork...) + } + return cidrs +} + func (rc *RemoteCluster) isEmpty() bool { return rc.NextHop == "" && len(rc.ClusterNetwork) == 0 && len(rc.ServiceNetwork) == 0 && rc.Domain == "" } diff --git a/pkg/controllers/c2cc/annotation.go b/pkg/controllers/c2cc/annotation.go new file mode 100644 index 0000000000..a2b5008a05 --- /dev/null +++ b/pkg/controllers/c2cc/annotation.go @@ -0,0 +1,208 @@ +package c2cc + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +const ( + ovnNodeDontSNATSubnets = "k8s.ovn.org/node-ingress-snat-exclude-subnets" + c2ccSNATTrackingAnnotation = "microshift.io/c2cc-snat-subnets" +) + +type annotationManager struct { + kubeClient kubernetes.Interface + nodeName string + desiredCIDRs []string +} + +func newAnnotationManager(kubeClient kubernetes.Interface, nodeName string, remoteCIDRs []string) *annotationManager { + sorted := make([]string, len(remoteCIDRs)) + copy(sorted, remoteCIDRs) + sort.Strings(sorted) + + return &annotationManager{ + kubeClient: kubeClient, + nodeName: nodeName, + desiredCIDRs: sorted, + } +} + +func parseCIDRAnnotation(value string) []string { + if value == "" { + return nil + } + var cidrs []string + if err := json.Unmarshal([]byte(value), &cidrs); err != nil { + return nil + } + return cidrs +} + +func cidrSetContainsAll(superset, subset []string) bool { + set := make(map[string]bool, len(superset)) + for _, c := range superset { + set[c] = true + } + for _, c := range subset { + if !set[c] { + return false + } + } + return true +} + +func (a *annotationManager) reconcile(ctx context.Context) error { + node, err := a.kubeClient.CoreV1().Nodes().Get(ctx, a.nodeName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get node %q: %w", a.nodeName, err) + } + + existing := parseCIDRAnnotation(node.Annotations[ovnNodeDontSNATSubnets]) + previous := parseCIDRAnnotation(node.Annotations[c2ccSNATTrackingAnnotation]) + + // Target = (existing - previous) + desired + // This replaces only the CIDRs C2CC previously wrote, preserving anything added by other components. + foreignCIDRs := make(map[string]bool, len(existing)) + for _, c := range existing { + foreignCIDRs[c] = true + } + for _, c := range previous { + delete(foreignCIDRs, c) + } + + targetSet := make(map[string]bool, len(foreignCIDRs)+len(a.desiredCIDRs)) + for c := range foreignCIDRs { + targetSet[c] = true + } + for _, c := range a.desiredCIDRs { + targetSet[c] = true + } + + target := make([]string, 0, len(targetSet)) + for c := range targetSet { + target = append(target, c) + } + sort.Strings(target) + + targetJSON, _ := json.Marshal(target) + desiredJSON, _ := json.Marshal(a.desiredCIDRs) + + if node.Annotations[ovnNodeDontSNATSubnets] == string(targetJSON) && + node.Annotations[c2ccSNATTrackingAnnotation] == string(desiredJSON) { + return nil + } + + patch := fmt.Sprintf(`{"metadata":{"annotations":{%q:%q,%q:%q}}}`, + ovnNodeDontSNATSubnets, string(targetJSON), + c2ccSNATTrackingAnnotation, string(desiredJSON)) + _, err = a.kubeClient.CoreV1().Nodes().Patch(ctx, a.nodeName, + types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("failed to patch node annotation: %w", err) + } + klog.V(2).Infof("Updated node annotation %s = %s (tracking: %s)", ovnNodeDontSNATSubnets, string(targetJSON), string(desiredJSON)) + return nil +} + +func (a *annotationManager) cleanup(ctx context.Context) error { + node, err := a.kubeClient.CoreV1().Nodes().Get(ctx, a.nodeName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get node %q for cleanup: %w", a.nodeName, err) + } + + tracked := parseCIDRAnnotation(node.Annotations[c2ccSNATTrackingAnnotation]) + if len(tracked) == 0 { + return nil + } + + existing := parseCIDRAnnotation(node.Annotations[ovnNodeDontSNATSubnets]) + trackedSet := make(map[string]bool, len(tracked)) + for _, c := range tracked { + trackedSet[c] = true + } + + var remaining []string + for _, c := range existing { + if !trackedSet[c] { + remaining = append(remaining, c) + } + } + + var snatValue string + if len(remaining) == 0 { + snatValue = "null" + } else { + sort.Strings(remaining) + data, _ := json.Marshal(remaining) + snatValue = fmt.Sprintf("%q", string(data)) + } + + patch := fmt.Sprintf(`{"metadata":{"annotations":{%s:%s,%q:null}}}`, + fmt.Sprintf("%q", ovnNodeDontSNATSubnets), snatValue, + c2ccSNATTrackingAnnotation) + _, err = a.kubeClient.CoreV1().Nodes().Patch(ctx, a.nodeName, + types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("failed to cleanup node annotation: %w", err) + } + klog.V(2).Infof("Cleaned up node annotation %s (removed %d C2CC CIDRs, %d remaining)", ovnNodeDontSNATSubnets, len(tracked), len(remaining)) + return nil +} + +func (a *annotationManager) subscribe(ctx context.Context, reconcileCh chan<- string) { + go func() { + for { + watcher, err := a.kubeClient.CoreV1().Nodes().Watch(ctx, metav1.ListOptions{ + FieldSelector: "metadata.name=" + a.nodeName, + }) + if err != nil { + klog.Warningf("Could not watch node for annotation changes: %v", err) + select { + case <-ctx.Done(): + return + case <-time.After(10 * time.Second): + continue + } + } + for event := range watcher.ResultChan() { + if event.Type != watch.Modified { + continue + } + node, ok := event.Object.(*corev1.Node) + if !ok { + continue + } + current := parseCIDRAnnotation(node.Annotations[ovnNodeDontSNATSubnets]) + if cidrSetContainsAll(current, a.desiredCIDRs) { + continue + } + select { + case reconcileCh <- "node-annotation-changed": + default: + } + } + watcher.Stop() + if ctx.Err() != nil { + return + } + klog.V(4).Infof("Node watch closed unexpectedly, reconnecting") + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Second): + } + } + }() + klog.V(2).Infof("Subscribed to node annotation changes for %s", a.nodeName) +} diff --git a/pkg/controllers/c2cc/annotation_test.go b/pkg/controllers/c2cc/annotation_test.go new file mode 100644 index 0000000000..a434715203 --- /dev/null +++ b/pkg/controllers/c2cc/annotation_test.go @@ -0,0 +1,101 @@ +package c2cc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewAnnotationManager_SortsDesiredCIDRs(t *testing.T) { + tests := []struct { + name string + cidrs []string + expected []string + }{ + { + name: "single cidr", + cidrs: []string{"10.45.0.0/16"}, + expected: []string{"10.45.0.0/16"}, + }, + { + name: "multiple cidrs already sorted", + cidrs: []string{"10.45.0.0/16", "10.46.0.0/16"}, + expected: []string{"10.45.0.0/16", "10.46.0.0/16"}, + }, + { + name: "multiple cidrs unsorted", + cidrs: []string{"172.31.0.0/16", "10.45.0.0/16", "10.46.0.0/16"}, + expected: []string{"10.45.0.0/16", "10.46.0.0/16", "172.31.0.0/16"}, + }, + { + name: "empty cidrs", + cidrs: []string{}, + expected: []string{}, + }, + { + name: "nil cidrs", + cidrs: nil, + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mgr := newAnnotationManager(nil, "test-node", tt.cidrs) + assert.Equal(t, tt.expected, mgr.desiredCIDRs) + assert.Equal(t, "test-node", mgr.nodeName) + }) + } +} + +func TestNewAnnotationManager_DoesNotMutateInput(t *testing.T) { + input := []string{"172.31.0.0/16", "10.45.0.0/16"} + original := make([]string, len(input)) + copy(original, input) + + newAnnotationManager(nil, "test-node", input) + + assert.Equal(t, original, input, "input slice should not be modified") +} + +func TestParseCIDRAnnotation(t *testing.T) { + tests := []struct { + name string + value string + expected []string + }{ + {name: "empty string", value: "", expected: nil}, + {name: "valid json", value: `["10.0.0.0/16","172.16.0.0/12"]`, expected: []string{"10.0.0.0/16", "172.16.0.0/12"}}, + {name: "invalid json", value: "not-json", expected: nil}, + {name: "empty array", value: "[]", expected: []string{}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseCIDRAnnotation(tt.value) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestCidrSetContainsAll(t *testing.T) { + tests := []struct { + name string + superset []string + subset []string + expected bool + }{ + {name: "empty subset", superset: []string{"10.0.0.0/16"}, subset: nil, expected: true}, + {name: "exact match", superset: []string{"10.0.0.0/16"}, subset: []string{"10.0.0.0/16"}, expected: true}, + {name: "superset contains all", superset: []string{"10.0.0.0/16", "172.16.0.0/12"}, subset: []string{"10.0.0.0/16"}, expected: true}, + {name: "missing element", superset: []string{"10.0.0.0/16"}, subset: []string{"172.16.0.0/12"}, expected: false}, + {name: "empty superset nonempty subset", superset: nil, subset: []string{"10.0.0.0/16"}, expected: false}, + {name: "both empty", superset: nil, subset: nil, expected: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, cidrSetContainsAll(tt.superset, tt.subset)) + }) + } +} diff --git a/pkg/controllers/c2cc/controller.go b/pkg/controllers/c2cc/controller.go new file mode 100644 index 0000000000..32f4726d61 --- /dev/null +++ b/pkg/controllers/c2cc/controller.go @@ -0,0 +1,268 @@ +package c2cc + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/openshift/microshift/pkg/config" + "github.com/ovn-kubernetes/libovsdb/client" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" +) + +const ( + reconcileInterval = 10 * time.Second +) + +type C2CCRouteManager struct { + cfg *config.Config + nodeName string + kubeconfig string + + kubeClient kubernetes.Interface + ovn *ovnRouteManager + annotation *annotationManager + nftMgr *nftablesManager + routes *linuxRouteManager + svcRoutes *serviceRouteManager + netpol *networkPolicyManager +} + +func NewC2CCRouteManager(cfg *config.Config) *C2CCRouteManager { + return &C2CCRouteManager{ + cfg: cfg, + nodeName: cfg.Node.HostnameOverride, + kubeconfig: cfg.KubeConfigPath(config.KubeAdmin), + } +} + +func (c *C2CCRouteManager) Name() string { return "c2cc-route-manager" } +func (c *C2CCRouteManager) Dependencies() []string { return []string{"kubelet"} } + +func (c *C2CCRouteManager) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { + defer close(stopped) + + if !c.cfg.C2CC.IsEnabled() { + klog.Infof("C2CC is disabled - attempting best effort cleanup") + close(ready) + closeCleanup := c.initForCleanup(ctx) + defer closeCleanup() + c.cleanupAll(ctx) + return ctx.Err() + } + + klog.Infof("C2CC is enabled with %d remote cluster(s)", len(c.cfg.C2CC.RemoteClusters)) + + // Declaring ready even before init because many of the components it tries to communicate with are not up yet + // and excessive waiting before readiness can cause them to never become ready resulting in MicroShift restart. + close(ready) + + if err := c.initKubeClient(); err != nil { + close(ready) + return fmt.Errorf("failed to create kube client: %w", err) + } + + nbClient, err := connectOVNNB(ctx) + if err != nil { + close(ready) + return fmt.Errorf("failed to connect OVN NB: %w", err) + } + defer nbClient.Close() + + if err := c.initSubsystems(nbClient); err != nil { + close(ready) + return fmt.Errorf("failed to init subsystems: %w", err) + } + + reconcileCh := make(chan string, 10) + + c.ovn.subscribe(ctx, reconcileCh) + + if routeDone, err := c.routes.subscribe(reconcileCh, "linux-route-change"); err != nil { + klog.Warningf("Could not subscribe to route events for table %d: %v", c2ccRouteTable, err) + } else { + defer close(routeDone) + } + + if svcRouteDone, err := c.svcRoutes.subscribe(reconcileCh, "service-route-change"); err != nil { + klog.Warningf("Could not subscribe to route events for table %d: %v", c2ccSvcRouteTable, err) + } else { + defer close(svcRouteDone) + } + + if nftClose, err := c.nftMgr.subscribe(ctx, reconcileCh); err != nil { + klog.Warningf("Could not subscribe to nftables events: %v", err) + } else { + defer nftClose() + } + + c.annotation.subscribe(ctx, reconcileCh) + + c.fullReconcile(ctx) + + ticker := time.NewTicker(reconcileInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + klog.Infof("Shutting down, routes preserved") + return ctx.Err() + case <-ticker.C: + klog.V(4).Infof("Periodic resync") + c.fullReconcile(ctx) + case reason := <-reconcileCh: + // Drain the channel to debounce reconcile events and avoid queueing - each reconcile performs setup of all subsystems, + // so it's not necessary to reconcile several times in a row. + coalesced := 0 + for { + select { + case <-reconcileCh: + coalesced++ + default: + goto drained + } + } + drained: + if coalesced > 0 { + klog.V(2).Infof("Event-triggered reconcile: %s (+%d coalesced)", reason, coalesced) + } else { + klog.V(2).Infof("Event-triggered reconcile: %s", reason) + } + c.fullReconcile(ctx) + } + } +} + +func (c *C2CCRouteManager) initKubeClient() error { + restCfg, err := clientcmd.BuildConfigFromFlags("", c.kubeconfig) + if err != nil { + return fmt.Errorf("failed to build kubeconfig: %w", err) + } + kClient, err := kubernetes.NewForConfig(restCfg) + if err != nil { + return fmt.Errorf("failed to create kubernetes client: %w", err) + } + c.kubeClient = kClient + return nil +} + +func (c *C2CCRouteManager) initSubsystems(nbClient client.Client) error { + c.ovn = newOVNRouteManager(nbClient, c.nodeName, c.cfg.C2CC.Resolved) + c.annotation = newAnnotationManager(c.kubeClient, c.nodeName, c.cfg.C2CC.AllRemoteCIDRs()) + c.routes = newLinuxRouteManager(c.cfg) + c.svcRoutes = newServiceRouteManager(c.cfg) + + remotePodCIDRs := make([]*net.IPNet, 0, len(c.cfg.C2CC.Resolved)*2) + allRemoteCIDRs := make([]*net.IPNet, 0, len(c.cfg.C2CC.Resolved)*4) + for _, rc := range c.cfg.C2CC.Resolved { + remotePodCIDRs = append(remotePodCIDRs, rc.ClusterNetwork...) + allRemoteCIDRs = append(allRemoteCIDRs, rc.ClusterNetwork...) + allRemoteCIDRs = append(allRemoteCIDRs, rc.ServiceNetwork...) + } + + nftMgr, err := newNftablesManager(allRemoteCIDRs) + if err != nil { + return fmt.Errorf("failed to init nftables manager: %w", err) + } + c.nftMgr = nftMgr + + c.netpol = newNetworkPolicyManager(c.kubeClient, remotePodCIDRs) + + return nil +} + +func (c *C2CCRouteManager) initForCleanup(ctx context.Context) func() { + if err := c.initKubeClient(); err != nil { + klog.Warningf("Could not init kube client for cleanup, Kubernetes C2CC state will not be removed: %v", err) + } + + var closers []func() + + cleanupCtx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + if nbClient, err := connectOVNNB(cleanupCtx); err == nil { + c.ovn = newOVNRouteManager(nbClient, c.nodeName, nil) + closers = append(closers, func() { nbClient.Close() }) + } else { + klog.Warningf("Could not connect to OVN NB for cleanup, OVN routes will not be removed: %v", err) + } + + c.routes = newLinuxRouteManager(c.cfg) + c.svcRoutes = newServiceRouteManager(c.cfg) + + if nftMgr, err := newNftablesManager(nil); err == nil { + c.nftMgr = nftMgr + } else { + klog.Warningf("Could not init nftables manager for cleanup: %v", err) + } + + if c.kubeClient != nil { + c.annotation = newAnnotationManager(c.kubeClient, c.nodeName, nil) + c.netpol = newNetworkPolicyManager(c.kubeClient, nil) + } + + return func() { + for _, fn := range closers { + fn() + } + } +} + +func (c *C2CCRouteManager) fullReconcile(ctx context.Context) { + subsystems := []struct { + name string + fn func(context.Context) error + }{ + {"ovn-routes", c.ovn.reconcile}, + {"node-annotation", c.annotation.reconcile}, + {"linux-routes", c.routes.reconcile}, + {"service-routes", c.svcRoutes.reconcile}, + {"nftables", c.nftMgr.reconcile}, + {"network-policy", c.netpol.reconcile}, + } + for _, s := range subsystems { + if err := s.fn(ctx); err != nil { + klog.Errorf("Reconcile %s failed: %v", s.name, err) + } + } +} + +func (c *C2CCRouteManager) cleanupAll(ctx context.Context) { + klog.V(2).Infof("Cleaning up any leftover C2CC state") + + type cleanable struct { + name string + fn func(context.Context) error + } + + var cleanups []cleanable + + if c.ovn != nil { + cleanups = append(cleanups, cleanable{"ovn-routes", c.ovn.cleanup}) + } + if c.annotation != nil { + cleanups = append(cleanups, cleanable{"node-annotation", c.annotation.cleanup}) + } + if c.routes != nil { + cleanups = append(cleanups, cleanable{"linux-routes", c.routes.cleanup}) + } + if c.svcRoutes != nil { + cleanups = append(cleanups, cleanable{"service-routes", c.svcRoutes.cleanup}) + } + if c.nftMgr != nil { + cleanups = append(cleanups, cleanable{"nftables", c.nftMgr.cleanup}) + } + if c.netpol != nil { + cleanups = append(cleanups, cleanable{"network-policy", c.netpol.cleanup}) + } + + for _, cl := range cleanups { + if err := cl.fn(ctx); err != nil { + klog.Errorf("Cleanup %s failed: %v", cl.name, err) + } + } +} diff --git a/pkg/controllers/c2cc/helpers_test.go b/pkg/controllers/c2cc/helpers_test.go new file mode 100644 index 0000000000..0f814de22b --- /dev/null +++ b/pkg/controllers/c2cc/helpers_test.go @@ -0,0 +1,52 @@ +package c2cc + +import ( + "net" + "testing" + + "github.com/openshift/microshift/pkg/config" + "github.com/stretchr/testify/require" +) + +type testRemoteConfig struct { + nextHop string + clusterNetwork []string + serviceNetwork []string +} + +func testRemote(nextHop string, clusterNetwork, serviceNetwork []string) testRemoteConfig { + return testRemoteConfig{ + nextHop: nextHop, + clusterNetwork: clusterNetwork, + serviceNetwork: serviceNetwork, + } +} + +func testConfigWithRemotes(t *testing.T, remotes ...testRemoteConfig) *config.Config { + t.Helper() + + cfg := &config.Config{} + cfg.Node.NodeIP = "192.168.1.1" + cfg.Network.ServiceNetwork = []string{"10.43.0.0/16"} + + for _, r := range remotes { + resolved := config.ResolvedRemoteCluster{ + NextHop: net.ParseIP(r.nextHop), + } + require.NotNil(t, resolved.NextHop, "invalid nextHop: %s", r.nextHop) + + for _, cn := range r.clusterNetwork { + _, ipNet, err := net.ParseCIDR(cn) + require.NoError(t, err) + resolved.ClusterNetwork = append(resolved.ClusterNetwork, ipNet) + } + for _, sn := range r.serviceNetwork { + _, ipNet, err := net.ParseCIDR(sn) + require.NoError(t, err) + resolved.ServiceNetwork = append(resolved.ServiceNetwork, ipNet) + } + cfg.C2CC.Resolved = append(cfg.C2CC.Resolved, resolved) + } + + return cfg +} diff --git a/pkg/controllers/c2cc/nbdb.go b/pkg/controllers/c2cc/nbdb.go new file mode 100644 index 0000000000..77ceedfbea --- /dev/null +++ b/pkg/controllers/c2cc/nbdb.go @@ -0,0 +1,104 @@ +package c2cc + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/openshift/microshift/pkg/util" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "k8s.io/klog/v2" +) + +const ( + ovnNBSocketPath = "/var/run/ovn/ovnnb_db.sock" + ovnNBEndpoint = "unix:" + ovnNBSocketPath + ovnNBDatabase = "OVN_Northbound" + + socketPollInterval = 5 * time.Second + connectTimeout = 30 * time.Second +) + +// LogicalRouter is a minimal OVN NB model for the Logical_Router table. +type LogicalRouter struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + StaticRoutes []string `ovsdb:"static_routes"` +} + +// LogicalRouterStaticRoute is a minimal OVN NB model for the Logical_Router_Static_Route table. +type LogicalRouterStaticRoute struct { + UUID string `ovsdb:"_uuid"` + IPPrefix string `ovsdb:"ip_prefix"` + Nexthop string `ovsdb:"nexthop"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Policy *string `ovsdb:"policy"` +} + +func nbdbModel() (model.ClientDBModel, error) { + dbModel, err := model.NewClientDBModel(ovnNBDatabase, map[string]model.Model{ + "Logical_Router": &LogicalRouter{}, + "Logical_Router_Static_Route": &LogicalRouterStaticRoute{}, + }) + if err != nil { + return dbModel, err + } + dbModel.SetIndexes(map[string][]model.ClientIndex{ + "Logical_Router": {{Columns: []model.ColumnKey{{Column: "name"}}}}, + }) + return dbModel, nil +} + +func waitForOVNSocket(ctx context.Context) error { + for { + exists, err := util.PathExists(ovnNBSocketPath) + if err != nil { + return fmt.Errorf("failed to stat OVN NB socket %s: %w", ovnNBSocketPath, err) + } + if exists { + return nil + } + klog.V(2).Infof("Waiting for OVN NB socket at %s", ovnNBSocketPath) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(socketPollInterval): + } + } +} + +func connectOVNNB(ctx context.Context) (client.Client, error) { //nolint:ireturn + if err := waitForOVNSocket(ctx); err != nil { + return nil, fmt.Errorf("failed to wait for OVN NB socket: %w", err) + } + + dbModel, err := nbdbModel() + if err != nil { + return nil, fmt.Errorf("failed to build OVN NB database model: %w", err) + } + + nbClient, err := client.NewOVSDBClient( + dbModel, + client.WithEndpoint(ovnNBEndpoint), + client.WithReconnect(connectTimeout, backoff.NewExponentialBackOff()), + ) + if err != nil { + return nil, fmt.Errorf("failed to create OVN NB client: %w", err) + } + + if err := nbClient.Connect(ctx); err != nil { + nbClient.Close() + return nil, fmt.Errorf("failed to connect to OVN NB: %w", err) + } + + _, err = nbClient.MonitorAll(ctx) + if err != nil { + nbClient.Close() + return nil, fmt.Errorf("failed to set up OVN NB monitor: %w", err) + } + + klog.Infof("Connected to OVN NB database at %s", ovnNBEndpoint) + return nbClient, nil +} diff --git a/pkg/controllers/c2cc/nbdb_test.go b/pkg/controllers/c2cc/nbdb_test.go new file mode 100644 index 0000000000..ec7474949e --- /dev/null +++ b/pkg/controllers/c2cc/nbdb_test.go @@ -0,0 +1,41 @@ +package c2cc + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNBDBModel(t *testing.T) { + dbModel, err := nbdbModel() + require.NoError(t, err) + assert.Equal(t, ovnNBDatabase, dbModel.Name()) +} + +func TestLogicalRouterStaticRoute_FieldTags(t *testing.T) { + route := LogicalRouterStaticRoute{ + UUID: "test-uuid", + IPPrefix: "10.45.0.0/16", + Nexthop: "192.168.1.1", + ExternalIDs: map[string]string{"k8s.ovn.org/owner-controller": "microshift-c2cc"}, + } + + assert.Equal(t, "test-uuid", route.UUID) + assert.Equal(t, "10.45.0.0/16", route.IPPrefix) + assert.Equal(t, "192.168.1.1", route.Nexthop) + assert.Equal(t, "microshift-c2cc", route.ExternalIDs["k8s.ovn.org/owner-controller"]) + assert.Nil(t, route.Policy) +} + +func TestLogicalRouter_FieldTags(t *testing.T) { + router := LogicalRouter{ + UUID: "router-uuid", + Name: "GR_test-node", + StaticRoutes: []string{"route-uuid-1", "route-uuid-2"}, + } + + assert.Equal(t, "router-uuid", router.UUID) + assert.Equal(t, "GR_test-node", router.Name) + assert.Len(t, router.StaticRoutes, 2) +} diff --git a/pkg/controllers/c2cc/networkpolicy.go b/pkg/controllers/c2cc/networkpolicy.go new file mode 100644 index 0000000000..79ceeb71fb --- /dev/null +++ b/pkg/controllers/c2cc/networkpolicy.go @@ -0,0 +1,98 @@ +package c2cc + +import ( + "context" + "fmt" + "net" + + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +const ( + c2ccNetworkPolicyName = "c2cc-allow-remote-pods" + c2ccNetworkPolicyNamespace = "default" + c2ccManagedByLabel = "app.kubernetes.io/managed-by" + c2ccManagedByValue = "microshift-c2cc" +) + +type networkPolicyManager struct { + kubeClient kubernetes.Interface + desired *networkingv1.NetworkPolicy +} + +func newNetworkPolicyManager(kubeClient kubernetes.Interface, remotePodCIDRs []*net.IPNet) *networkPolicyManager { + ingressPeers := make([]networkingv1.NetworkPolicyPeer, 0, len(remotePodCIDRs)) + for _, cidr := range remotePodCIDRs { + ingressPeers = append(ingressPeers, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: cidr.String(), + }, + }) + } + + policy := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: c2ccNetworkPolicyName, + Namespace: c2ccNetworkPolicyNamespace, + Labels: map[string]string{ + c2ccManagedByLabel: c2ccManagedByValue, + }, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {From: ingressPeers}, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + }, + }, + } + + return &networkPolicyManager{ + kubeClient: kubeClient, + desired: policy, + } +} + +func (m *networkPolicyManager) reconcile(ctx context.Context) error { + client := m.kubeClient.NetworkingV1().NetworkPolicies(c2ccNetworkPolicyNamespace) + + existing, err := client.Get(ctx, c2ccNetworkPolicyName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, m.desired, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create NetworkPolicy: %w", err) + } + klog.V(2).Infof("Created NetworkPolicy %s/%s", c2ccNetworkPolicyNamespace, c2ccNetworkPolicyName) + return nil + } + if err != nil { + return fmt.Errorf("failed to get NetworkPolicy: %w", err) + } + + toUpdate := m.desired.DeepCopy() + toUpdate.ResourceVersion = existing.ResourceVersion + _, err = client.Update(ctx, toUpdate, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update NetworkPolicy: %w", err) + } + return nil +} + +func (m *networkPolicyManager) cleanup(ctx context.Context) error { + err := m.kubeClient.NetworkingV1().NetworkPolicies(c2ccNetworkPolicyNamespace).Delete( + ctx, c2ccNetworkPolicyName, metav1.DeleteOptions{}) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return fmt.Errorf("failed to delete NetworkPolicy: %w", err) + } + klog.V(2).Infof("Deleted NetworkPolicy %s/%s", c2ccNetworkPolicyNamespace, c2ccNetworkPolicyName) + return nil +} diff --git a/pkg/controllers/c2cc/nftables.go b/pkg/controllers/c2cc/nftables.go new file mode 100644 index 0000000000..0a020f8fc0 --- /dev/null +++ b/pkg/controllers/c2cc/nftables.go @@ -0,0 +1,210 @@ +package c2cc + +import ( + "context" + "fmt" + "net" + "strings" + "syscall" + "time" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" + "k8s.io/klog/v2" + "sigs.k8s.io/knftables" +) + +const ( + nftTable = "ovn-kubernetes" + nftChain = "ovn-kube-pod-subnet-masq" + nftCommentPrefix = "c2cc-no-masq:" +) + +type nftablesManager struct { + nft knftables.Interface + desiredCIDRs map[string]string // cidr -> bypass rule expression +} + +func newNftablesManager(remoteCIDRs []*net.IPNet) (*nftablesManager, error) { + nft, err := knftables.New(knftables.InetFamily, nftTable) + if err != nil { + return nil, fmt.Errorf("failed to create knftables interface: %w", err) + } + + desired := make(map[string]string, len(remoteCIDRs)) + for _, cidr := range remoteCIDRs { + prefix := "ip" + if cidr.IP.To4() == nil { + prefix = "ip6" + } + desired[cidr.String()] = fmt.Sprintf("%s daddr %s return", prefix, cidr) + } + + return &nftablesManager{ + nft: nft, + desiredCIDRs: desired, + }, nil +} + +func nftCommentForCIDR(cidr string) string { + return nftCommentPrefix + cidr +} + +func cidrFromNFTComment(comment string) string { + if !strings.HasPrefix(comment, nftCommentPrefix) { + return "" + } + return strings.TrimPrefix(comment, nftCommentPrefix) +} + +func (m *nftablesManager) reconcile(ctx context.Context) error { + existing, err := m.nft.ListRules(ctx, nftChain) + if err != nil { + if knftables.IsNotFound(err) { + klog.V(4).Infof("nftables chain %s does not exist yet, will retry", nftChain) + return nil + } + return fmt.Errorf("failed to list nftables rules: %w", err) + } + + actualCIDRs := make(map[string]*knftables.Rule, len(existing)) + for _, r := range existing { + if r.Comment == nil { + continue + } + if cidr := cidrFromNFTComment(*r.Comment); cidr != "" { + actualCIDRs[cidr] = r + } + } + + tx := m.nft.NewTransaction() + changed := false + + for cidr, ruleExpr := range m.desiredCIDRs { + if _, exists := actualCIDRs[cidr]; exists { + continue + } + comment := nftCommentForCIDR(cidr) + tx.Insert(&knftables.Rule{ + Chain: nftChain, + Rule: ruleExpr, + Comment: &comment, + }) + changed = true + klog.V(2).Infof("nftables: inserting bypass rule for %s", cidr) + } + + for cidr, rule := range actualCIDRs { + if _, desired := m.desiredCIDRs[cidr]; desired { + continue + } + tx.Delete(&knftables.Rule{ + Chain: nftChain, + Handle: rule.Handle, + }) + changed = true + klog.V(2).Infof("nftables: removing stale bypass rule for %s", cidr) + } + + if !changed { + return nil + } + + if err := m.nft.Run(ctx, tx); err != nil { + return fmt.Errorf("failed to run nftables transaction: %w", err) + } + return nil +} + +func (m *nftablesManager) cleanup(ctx context.Context) error { + existing, err := m.nft.ListRules(ctx, nftChain) + if err != nil { + if knftables.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to list nftables rules: %w", err) + } + + tx := m.nft.NewTransaction() + changed := false + for _, r := range existing { + if r.Comment == nil { + continue + } + if cidrFromNFTComment(*r.Comment) != "" { + tx.Delete(&knftables.Rule{ + Chain: nftChain, + Handle: r.Handle, + }) + changed = true + } + } + + if !changed { + return nil + } + + return m.nft.Run(ctx, tx) +} + +func (m *nftablesManager) subscribe(ctx context.Context, reconcileCh chan<- string) (func(), error) { + sock, err := nl.Subscribe(unix.NETLINK_NETFILTER, unix.NFNLGRP_NFTABLES) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to nftables events: %w", err) + } + + // Debounce nftables events: reconcile() itself modifies nftables, + // which generates kernel netlink events back into this subscription. + // Without debouncing, this creates a tight reconcile→event→reconcile loop. + rawCh := make(chan struct{}, 1) + + go func() { + defer close(rawCh) + for { + msgs, _, err := sock.Receive() + if err != nil { + klog.V(4).Infof("nftables netlink receive error: %v", err) + return + } + for _, msg := range msgs { + if msg.Header.Type == syscall.NLMSG_DONE || msg.Header.Type == syscall.NLMSG_ERROR { + continue + } + msgType := int(msg.Header.Type) & 0xFF + if msgType == unix.NFT_MSG_NEWRULE || + msgType == unix.NFT_MSG_DELRULE || + msgType == unix.NFT_MSG_NEWCHAIN || + msgType == unix.NFT_MSG_DELCHAIN { + select { + case rawCh <- struct{}{}: + default: + } + } + } + } + }() + + go func() { + var debounce <-chan time.Time + for { + select { + case <-ctx.Done(): + return + case _, ok := <-rawCh: + if !ok { + return + } + debounce = time.After(2 * time.Second) + case <-debounce: + select { + case reconcileCh <- "nftables-change": + default: + } + debounce = nil + } + } + }() + + klog.V(2).Infof("Subscribed to nftables netlink events (NFNLGRP_NFTABLES)") + return sock.Close, nil +} diff --git a/pkg/controllers/c2cc/nftables_test.go b/pkg/controllers/c2cc/nftables_test.go new file mode 100644 index 0000000000..7237a98835 --- /dev/null +++ b/pkg/controllers/c2cc/nftables_test.go @@ -0,0 +1,195 @@ +package c2cc + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/knftables" +) + +func setupFakeNFT(t *testing.T) *knftables.Fake { + t.Helper() + fake := knftables.NewFake(knftables.InetFamily, nftTable) + + tx := fake.NewTransaction() + tx.Add(&knftables.Table{}) + tx.Add(&knftables.Chain{Name: nftChain}) + require.NoError(t, fake.Run(context.Background(), tx)) + + return fake +} + +func parseCIDR(t *testing.T, s string) *net.IPNet { + t.Helper() + _, cidr, err := net.ParseCIDR(s) + require.NoError(t, err) + return cidr +} + +func TestNftCommentForCIDR(t *testing.T) { + assert.Equal(t, "c2cc-no-masq:10.45.0.0/16", nftCommentForCIDR("10.45.0.0/16")) + assert.Equal(t, "c2cc-no-masq:fd01::/48", nftCommentForCIDR("fd01::/48")) +} + +func TestCIDRFromNFTComment(t *testing.T) { + tests := []struct { + comment string + expected string + }{ + {"c2cc-no-masq:10.45.0.0/16", "10.45.0.0/16"}, + {"c2cc-no-masq:fd01::/48", "fd01::/48"}, + {"some-other-comment", ""}, + {"", ""}, + } + for _, tt := range tests { + assert.Equal(t, tt.expected, cidrFromNFTComment(tt.comment)) + } +} + +func TestNftablesManager_ReconcileAddsMissingRules(t *testing.T) { + fake := setupFakeNFT(t) + ctx := context.Background() + + cidrs := []*net.IPNet{ + parseCIDR(t, "10.45.0.0/16"), + parseCIDR(t, "10.46.0.0/16"), + } + + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: make(map[string]string), + } + for _, cidr := range cidrs { + mgr.desiredCIDRs[cidr.String()] = "ip daddr " + cidr.String() + " return" + } + + err := mgr.reconcile(ctx) + require.NoError(t, err) + + rules, err := fake.ListRules(ctx, nftChain) + require.NoError(t, err) + assert.Len(t, rules, 2) + + foundCIDRs := make(map[string]bool) + for _, r := range rules { + require.NotNil(t, r.Comment) + cidr := cidrFromNFTComment(*r.Comment) + foundCIDRs[cidr] = true + } + assert.True(t, foundCIDRs["10.45.0.0/16"]) + assert.True(t, foundCIDRs["10.46.0.0/16"]) +} + +func TestNftablesManager_ReconcileIsIdempotent(t *testing.T) { + fake := setupFakeNFT(t) + ctx := context.Background() + + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: map[string]string{"10.45.0.0/16": "ip daddr 10.45.0.0/16 return"}, + } + + require.NoError(t, mgr.reconcile(ctx)) + require.NoError(t, mgr.reconcile(ctx)) + + rules, err := fake.ListRules(ctx, nftChain) + require.NoError(t, err) + assert.Len(t, rules, 1) +} + +func TestNftablesManager_ReconcileRemovesStaleRules(t *testing.T) { + fake := setupFakeNFT(t) + ctx := context.Background() + + comment := nftCommentForCIDR("10.99.0.0/16") + tx := fake.NewTransaction() + tx.Add(&knftables.Rule{ + Chain: nftChain, + Rule: "ip daddr 10.99.0.0/16 return", + Comment: &comment, + }) + require.NoError(t, fake.Run(ctx, tx)) + + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: map[string]string{"10.45.0.0/16": "ip daddr 10.45.0.0/16 return"}, + } + + require.NoError(t, mgr.reconcile(ctx)) + + rules, err := fake.ListRules(ctx, nftChain) + require.NoError(t, err) + assert.Len(t, rules, 1) + require.NotNil(t, rules[0].Comment) + assert.Equal(t, "10.45.0.0/16", cidrFromNFTComment(*rules[0].Comment)) +} + +func TestNftablesManager_ReconcileChainNotFound(t *testing.T) { + fake := knftables.NewFake(knftables.InetFamily, nftTable) + ctx := context.Background() + + tx := fake.NewTransaction() + tx.Add(&knftables.Table{}) + require.NoError(t, fake.Run(ctx, tx)) + + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: map[string]string{"10.45.0.0/16": "ip daddr 10.45.0.0/16 return"}, + } + + err := mgr.reconcile(ctx) + assert.NoError(t, err) +} + +func TestNftablesManager_CleanupRemovesAllC2CCRules(t *testing.T) { + fake := setupFakeNFT(t) + ctx := context.Background() + + tx := fake.NewTransaction() + c1 := nftCommentForCIDR("10.45.0.0/16") + c2 := nftCommentForCIDR("10.46.0.0/16") + nonC2CC := "some-other-rule" + tx.Add(&knftables.Rule{Chain: nftChain, Rule: "ip daddr 10.45.0.0/16 return", Comment: &c1}) + tx.Add(&knftables.Rule{Chain: nftChain, Rule: "ip daddr 10.46.0.0/16 return", Comment: &c2}) + tx.Add(&knftables.Rule{Chain: nftChain, Rule: "ip daddr 10.0.0.0/8 masquerade", Comment: &nonC2CC}) + require.NoError(t, fake.Run(ctx, tx)) + + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: map[string]string{}, + } + + require.NoError(t, mgr.cleanup(ctx)) + + rules, err := fake.ListRules(ctx, nftChain) + require.NoError(t, err) + assert.Len(t, rules, 1, "only non-c2cc rule should remain") + assert.Equal(t, "some-other-rule", *rules[0].Comment) +} + +func TestNftablesManager_IPv6RuleExpression(t *testing.T) { + cidrs := []*net.IPNet{parseCIDR(t, "fd01::/48")} + + fake := setupFakeNFT(t) + mgr := &nftablesManager{ + nft: fake, + desiredCIDRs: make(map[string]string), + } + for _, cidr := range cidrs { + prefix := "ip" + if cidr.IP.To4() == nil { + prefix = "ip6" + } + mgr.desiredCIDRs[cidr.String()] = prefix + " daddr " + cidr.String() + " return" + } + + require.NoError(t, mgr.reconcile(context.Background())) + + rules, err := fake.ListRules(context.Background(), nftChain) + require.NoError(t, err) + require.Len(t, rules, 1) + assert.Contains(t, rules[0].Rule, "ip6 daddr fd01::/48 return") +} diff --git a/pkg/controllers/c2cc/ovn.go b/pkg/controllers/c2cc/ovn.go new file mode 100644 index 0000000000..e23b89a9df --- /dev/null +++ b/pkg/controllers/c2cc/ovn.go @@ -0,0 +1,265 @@ +package c2cc + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "time" + + "github.com/openshift/microshift/pkg/config" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "k8s.io/klog/v2" +) + +const ( + c2ccOwnerController = "microshift-c2cc" + ownerControllerKey = "k8s.ovn.org/owner-controller" + gwRouterPrefix = "GR_" +) + +func buildNamedUUID(prefix, suffix string) string { + r := strings.NewReplacer(".", "_", "/", "_", ":", "_", "-", "_") + return r.Replace(prefix + suffix) +} + +type routeKey struct { + prefix string + nexthop string +} + +type ovnRouteManager struct { + nbClient client.Client + gwRouter string + desired []LogicalRouterStaticRoute +} + +func newOVNRouteManager(nbClient client.Client, nodeName string, resolved []config.ResolvedRemoteCluster) *ovnRouteManager { + gwRouter := gwRouterPrefix + nodeName + + var desired []LogicalRouterStaticRoute + for _, rc := range resolved { + nexthop := rc.NextHop.String() + allCIDRs := append([]*net.IPNet{}, rc.ClusterNetwork...) + allCIDRs = append(allCIDRs, rc.ServiceNetwork...) + for _, cidr := range allCIDRs { + desired = append(desired, LogicalRouterStaticRoute{ + IPPrefix: cidr.String(), + Nexthop: nexthop, + ExternalIDs: map[string]string{ownerControllerKey: c2ccOwnerController}, + }) + } + } + + return &ovnRouteManager{ + nbClient: nbClient, + gwRouter: gwRouter, + desired: desired, + } +} + +func (m *ovnRouteManager) reconcile(ctx context.Context) error { + actual, err := m.listC2CCRoutes(ctx) + if err != nil { + return fmt.Errorf("failed to list OVN routes: %w", err) + } + + actualByKey := make(map[routeKey]*LogicalRouterStaticRoute, len(actual)) + for i := range actual { + k := routeKey{prefix: actual[i].IPPrefix, nexthop: actual[i].Nexthop} + actualByKey[k] = &actual[i] + } + + desiredKeys := make(map[routeKey]bool, len(m.desired)) + for _, d := range m.desired { + desiredKeys[routeKey{prefix: d.IPPrefix, nexthop: d.Nexthop}] = true + } + + var ops []ovsdb.Operation + + for _, d := range m.desired { + k := routeKey{prefix: d.IPPrefix, nexthop: d.Nexthop} + if _, exists := actualByKey[k]; exists { + continue + } + + route := d + route.UUID = buildNamedUUID("c2cc_route_", route.IPPrefix) + createOps, err := m.nbClient.Create(&route) + if err != nil { + return fmt.Errorf("failed to create route %s via %s: %w", route.IPPrefix, route.Nexthop, err) + } + ops = append(ops, createOps...) + + router := &LogicalRouter{Name: m.gwRouter} + mutateOps, err := m.nbClient.Where(router).Mutate(router, model.Mutation{ + Field: &router.StaticRoutes, + Mutator: ovsdb.MutateOperationInsert, + Value: []string{route.UUID}, + }) + if err != nil { + return fmt.Errorf("failed to mutate router for route %s: %w", route.IPPrefix, err) + } + ops = append(ops, mutateOps...) + + klog.V(2).Infof("OVN route add: %s via %s on %s", route.IPPrefix, route.Nexthop, m.gwRouter) + } + + for k, existing := range actualByKey { + if desiredKeys[k] { + continue + } + + router := &LogicalRouter{Name: m.gwRouter} + mutateOps, err := m.nbClient.Where(router).Mutate(router, model.Mutation{ + Field: &router.StaticRoutes, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{existing.UUID}, + }) + if err != nil { + return fmt.Errorf("failed to mutate router to remove route %s: %w", existing.UUID, err) + } + ops = append(ops, mutateOps...) + + deleteOps, err := m.nbClient.Where(existing).Delete() + if err != nil { + return fmt.Errorf("failed to delete route %s: %w", existing.UUID, err) + } + ops = append(ops, deleteOps...) + + klog.V(2).Infof("OVN route remove: %s via %s from %s", existing.IPPrefix, existing.Nexthop, m.gwRouter) + } + + if len(ops) == 0 { + return nil + } + + results, err := m.nbClient.Transact(ctx, ops...) + if err != nil { + return fmt.Errorf("failed to transact OVN: %w", err) + } + for _, r := range results { + if r.Error != "" { + return fmt.Errorf("failed to transact OVN: %s (%s)", r.Error, r.Details) + } + } + + return nil +} + +func (m *ovnRouteManager) cleanup(ctx context.Context) error { + routes, err := m.listC2CCRoutes(ctx) + if err != nil { + return err + } + + var errs []error + for _, r := range routes { + route := r + router := &LogicalRouter{Name: m.gwRouter} + + ops := make([]ovsdb.Operation, 0, 2) + mutateOps, err := m.nbClient.Where(router).Mutate(router, model.Mutation{ + Field: &router.StaticRoutes, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{route.UUID}, + }) + if err != nil { + klog.Errorf("Failed to build mutate for route %s: %v", route.UUID, err) + errs = append(errs, err) + continue + } + ops = append(ops, mutateOps...) + + delOps, err := m.nbClient.Where(&route).Delete() + if err != nil { + klog.Errorf("Failed to build delete for route %s: %v", route.UUID, err) + errs = append(errs, err) + continue + } + ops = append(ops, delOps...) + + results, err := m.nbClient.Transact(ctx, ops...) + if err != nil { + klog.Errorf("Failed to remove OVN route %s: %v", route.UUID, err) + errs = append(errs, err) + continue + } + for _, res := range results { + if res.Error != "" { + opErr := fmt.Errorf("failed to remove OVN route %s: %s (%s)", route.UUID, res.Error, res.Details) + klog.Errorf("%v", opErr) + errs = append(errs, opErr) + } + } + } + return errors.Join(errs...) +} + +func (m *ovnRouteManager) subscribe(ctx context.Context, reconcileCh chan<- string) { + m.nbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, old, updated model.Model) { + if table != "Logical_Router_Static_Route" { + return + } + if route, ok := updated.(*LogicalRouterStaticRoute); ok && route.ExternalIDs[ownerControllerKey] == c2ccOwnerController { + select { + case reconcileCh <- "ovn-route-updated": + default: + } + } + }, + DeleteFunc: func(table string, old model.Model) { + if table != "Logical_Router_Static_Route" { + return + } + if route, ok := old.(*LogicalRouterStaticRoute); ok && route.ExternalIDs[ownerControllerKey] == c2ccOwnerController { + select { + case reconcileCh <- "ovn-route-deleted": + default: + } + } + }, + }) + klog.V(2).Infof("Subscribed to OVN NB cache events for C2CC routes") + + go func() { + for { + disconnectCh := m.nbClient.DisconnectNotify() + select { + case <-ctx.Done(): + return + case <-disconnectCh: + klog.Warningf("OVN NB connection lost, waiting for reconnect...") + for !m.nbClient.Connected() { + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Second): + } + } + klog.Infof("OVN NB reconnected, triggering full reconcile") + select { + case reconcileCh <- "ovn-reconnected": + default: + } + } + } + }() + klog.V(2).Infof("Subscribed to OVN NB disconnect notifications") +} + +func (m *ovnRouteManager) listC2CCRoutes(ctx context.Context) ([]LogicalRouterStaticRoute, error) { + var routes []LogicalRouterStaticRoute + err := m.nbClient.WhereCache(func(r *LogicalRouterStaticRoute) bool { + return r.ExternalIDs[ownerControllerKey] == c2ccOwnerController + }).List(ctx, &routes) + if err != nil { + return nil, err + } + return routes, nil +} diff --git a/pkg/controllers/c2cc/ovn_test.go b/pkg/controllers/c2cc/ovn_test.go new file mode 100644 index 0000000000..9ba1011e26 --- /dev/null +++ b/pkg/controllers/c2cc/ovn_test.go @@ -0,0 +1,82 @@ +package c2cc + +import ( + "net" + "testing" + + "github.com/openshift/microshift/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewOVNRouteManager_DesiredRoutes(t *testing.T) { + resolved := []config.ResolvedRemoteCluster{ + { + NextHop: net.ParseIP("192.168.1.10"), + ClusterNetwork: []*net.IPNet{parseCIDR(t, "10.45.0.0/16")}, + ServiceNetwork: []*net.IPNet{parseCIDR(t, "10.46.0.0/16")}, + }, + } + + mgr := newOVNRouteManager(nil, "test-node", resolved) + + assert.Equal(t, "GR_test-node", mgr.gwRouter) + require.Len(t, mgr.desired, 2) + + assert.Equal(t, "10.45.0.0/16", mgr.desired[0].IPPrefix) + assert.Equal(t, "192.168.1.10", mgr.desired[0].Nexthop) + assert.Equal(t, c2ccOwnerController, mgr.desired[0].ExternalIDs[ownerControllerKey]) + + assert.Equal(t, "10.46.0.0/16", mgr.desired[1].IPPrefix) + assert.Equal(t, "192.168.1.10", mgr.desired[1].Nexthop) +} + +func TestNewOVNRouteManager_MultipleRemotes(t *testing.T) { + resolved := []config.ResolvedRemoteCluster{ + { + NextHop: net.ParseIP("192.168.1.10"), + ClusterNetwork: []*net.IPNet{parseCIDR(t, "10.45.0.0/16")}, + ServiceNetwork: []*net.IPNet{parseCIDR(t, "10.46.0.0/16")}, + }, + { + NextHop: net.ParseIP("192.168.1.20"), + ClusterNetwork: []*net.IPNet{parseCIDR(t, "10.55.0.0/16")}, + ServiceNetwork: []*net.IPNet{parseCIDR(t, "10.56.0.0/16")}, + }, + } + + mgr := newOVNRouteManager(nil, "node-a", resolved) + + assert.Equal(t, "GR_node-a", mgr.gwRouter) + require.Len(t, mgr.desired, 4) + + nexthops := make(map[string]string) + for _, d := range mgr.desired { + nexthops[d.IPPrefix] = d.Nexthop + } + assert.Equal(t, "192.168.1.10", nexthops["10.45.0.0/16"]) + assert.Equal(t, "192.168.1.10", nexthops["10.46.0.0/16"]) + assert.Equal(t, "192.168.1.20", nexthops["10.55.0.0/16"]) + assert.Equal(t, "192.168.1.20", nexthops["10.56.0.0/16"]) +} + +func TestNewOVNRouteManager_EmptyResolved(t *testing.T) { + mgr := newOVNRouteManager(nil, "test-node", nil) + assert.Empty(t, mgr.desired) + assert.Equal(t, "GR_test-node", mgr.gwRouter) +} + +func TestOVNRouteManager_OwnerTag(t *testing.T) { + resolved := []config.ResolvedRemoteCluster{ + { + NextHop: net.ParseIP("192.168.1.10"), + ClusterNetwork: []*net.IPNet{parseCIDR(t, "10.45.0.0/16")}, + }, + } + mgr := newOVNRouteManager(nil, "test-node", resolved) + + for _, d := range mgr.desired { + assert.Equal(t, "microshift-c2cc", d.ExternalIDs[ownerControllerKey], + "all routes must have the owner controller tag") + } +} diff --git a/pkg/controllers/c2cc/policy_routes.go b/pkg/controllers/c2cc/policy_routes.go new file mode 100644 index 0000000000..11507dea1b --- /dev/null +++ b/pkg/controllers/c2cc/policy_routes.go @@ -0,0 +1,129 @@ +package c2cc + +import ( + "errors" + "fmt" + "net" + + "github.com/vishvananda/netlink" + "k8s.io/klog/v2" +) + +type policyRouteTable struct { + table int + proto int + priority int +} + +func (t *policyRouteTable) reconcileRoutes(desired []netlink.Route) error { + actual, err := netlink.RouteListFiltered(netlink.FAMILY_ALL, &netlink.Route{ + Table: t.table, + Protocol: netlink.RouteProtocol(t.proto), + }, netlink.RT_FILTER_TABLE|netlink.RT_FILTER_PROTOCOL) + if err != nil { + return fmt.Errorf("failed to list table %d routes: %w", t.table, err) + } + + actualByDst := make(map[string]netlink.Route, len(actual)) + for _, r := range actual { + if r.Dst != nil { + actualByDst[r.Dst.String()] = r + } + } + + var errs []error + desiredByDst := make(map[string]bool, len(desired)) + for _, r := range desired { + dst := r.Dst.String() + desiredByDst[dst] = true + route := r + if actual, exists := actualByDst[dst]; exists && actual.Gw.Equal(route.Gw) && (route.LinkIndex == 0 || actual.LinkIndex == route.LinkIndex) { + continue + } + if err := netlink.RouteReplace(&route); err != nil { + klog.Errorf("Failed to add route to %s via %s: %v", dst, route.Gw, err) + errs = append(errs, fmt.Errorf("failed to add route %s: %w", dst, err)) + continue + } + klog.V(2).Infof("Route add: %s via %s table %d", dst, route.Gw, t.table) + } + + for dst, r := range actualByDst { + if desiredByDst[dst] { + continue + } + route := r + if err := netlink.RouteDel(&route); err != nil { + klog.Errorf("Failed to delete stale route %s: %v", dst, err) + errs = append(errs, fmt.Errorf("failed to delete route %s: %w", dst, err)) + continue + } + klog.V(2).Infof("Route del: %s table %d (stale)", dst, t.table) + } + + return errors.Join(errs...) +} + +func (t *policyRouteTable) cleanupRoutes() error { + routes, err := netlink.RouteListFiltered(netlink.FAMILY_ALL, &netlink.Route{ + Table: t.table, + Protocol: netlink.RouteProtocol(t.proto), + }, netlink.RT_FILTER_TABLE|netlink.RT_FILTER_PROTOCOL) + if err != nil { + return fmt.Errorf("failed to list table %d routes for cleanup: %w", t.table, err) + } + for _, r := range routes { + route := r + if err := netlink.RouteDel(&route); err != nil { + klog.Errorf("Failed to cleanup route %v: %v", r.Dst, err) + } + } + return nil +} + +func (t *policyRouteTable) cleanupRules() error { + allRules, err := netlink.RuleList(netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("failed to list ip rules for cleanup: %w", err) + } + for _, r := range allRules { + if r.Priority == t.priority && r.Table == t.table { + rule := r + if err := netlink.RuleDel(&rule); err != nil { + klog.Errorf("Failed to cleanup ip rule: %v", err) + } + } + } + return nil +} + +func (t *policyRouteTable) subscribe(reconcileCh chan<- string, reason string) (chan struct{}, error) { + routeUpdates := make(chan netlink.RouteUpdate, 100) + done := make(chan struct{}) + + if err := netlink.RouteSubscribe(routeUpdates, done); err != nil { + return nil, fmt.Errorf("failed to subscribe to route events: %w", err) + } + + go func() { + for update := range routeUpdates { + if update.Table != t.table { + continue + } + select { + case reconcileCh <- reason: + default: + } + } + }() + + klog.V(2).Infof("Subscribed to netlink route events for table %d", t.table) + return done, nil +} + +func ipFamilyOf(cidr *net.IPNet) int { + if cidr.IP.To4() != nil { + return netlink.FAMILY_V4 + } + return netlink.FAMILY_V6 +} diff --git a/pkg/controllers/c2cc/policy_routes_test.go b/pkg/controllers/c2cc/policy_routes_test.go new file mode 100644 index 0000000000..87028ada6b --- /dev/null +++ b/pkg/controllers/c2cc/policy_routes_test.go @@ -0,0 +1,27 @@ +package c2cc + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" +) + +func TestIPFamilyOf(t *testing.T) { + tests := []struct { + cidr string + expected int + }{ + {"10.45.0.0/16", netlink.FAMILY_V4}, + {"192.168.1.0/24", netlink.FAMILY_V4}, + {"fd01::/48", netlink.FAMILY_V6}, + {"::1/128", netlink.FAMILY_V6}, + } + + for _, tt := range tests { + t.Run(tt.cidr, func(t *testing.T) { + cidr := parseCIDR(t, tt.cidr) + assert.Equal(t, tt.expected, ipFamilyOf(cidr)) + }) + } +} diff --git a/pkg/controllers/c2cc/routes.go b/pkg/controllers/c2cc/routes.go new file mode 100644 index 0000000000..977697bb3a --- /dev/null +++ b/pkg/controllers/c2cc/routes.go @@ -0,0 +1,122 @@ +package c2cc + +import ( + "context" + "errors" + "fmt" + "net" + "syscall" + + "github.com/openshift/microshift/pkg/config" + "github.com/vishvananda/netlink" + "k8s.io/klog/v2" +) + +const ( + c2ccRouteTable = 200 + c2ccRouteProto = 200 + c2ccRulePriority = 100 +) + +type linuxRouteManager struct { + policyRouteTable + + desiredDsts []*net.IPNet + desiredGWs map[string]net.IP +} + +func newLinuxRouteManager(cfg *config.Config) *linuxRouteManager { + m := &linuxRouteManager{ + policyRouteTable: policyRouteTable{ + table: c2ccRouteTable, + proto: c2ccRouteProto, + priority: c2ccRulePriority, + }, + desiredGWs: make(map[string]net.IP), + } + + for _, rc := range cfg.C2CC.Resolved { + allCIDRs := append([]*net.IPNet{}, rc.ClusterNetwork...) + allCIDRs = append(allCIDRs, rc.ServiceNetwork...) + for _, cidr := range allCIDRs { + m.desiredDsts = append(m.desiredDsts, cidr) + m.desiredGWs[cidr.String()] = rc.NextHop + } + } + + return m +} + +func (m *linuxRouteManager) reconcile(ctx context.Context) error { + desired := make([]netlink.Route, 0, len(m.desiredDsts)) + for _, cidr := range m.desiredDsts { + desired = append(desired, netlink.Route{ + Dst: cidr, + Gw: m.desiredGWs[cidr.String()], + Table: m.table, + Protocol: netlink.RouteProtocol(m.proto), + }) + } + + if err := m.reconcileRoutes(desired); err != nil { + return fmt.Errorf("failed to reconcile linux routes: %w", err) + } + if err := m.reconcileRules(); err != nil { + return fmt.Errorf("failed to reconcile ip rules: %w", err) + } + return nil +} + +func (m *linuxRouteManager) reconcileRules() error { + allRules, err := netlink.RuleList(netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("failed to list ip rules: %w", err) + } + + actualByDst := make(map[string]netlink.Rule) + for _, r := range allRules { + if r.Priority == m.priority && r.Table == m.table && r.Dst != nil { + actualByDst[r.Dst.String()] = r + } + } + + var errs []error + for _, cidr := range m.desiredDsts { + dst := cidr.String() + if _, exists := actualByDst[dst]; exists { + delete(actualByDst, dst) + continue + } + rule := netlink.NewRule() + rule.Dst = cidr + rule.Table = m.table + rule.Priority = m.priority + rule.Family = ipFamilyOf(cidr) + if err := netlink.RuleAdd(rule); err != nil { + if !errors.Is(err, syscall.EEXIST) { + klog.Errorf("Failed to add ip rule for %s: %v", dst, err) + errs = append(errs, fmt.Errorf("failed to add rule %s: %w", dst, err)) + } + continue + } + klog.V(2).Infof("IP rule add: to %s lookup %d priority %d", dst, m.table, m.priority) + } + + for dst, r := range actualByDst { + rule := r + if err := netlink.RuleDel(&rule); err != nil { + klog.Errorf("Failed to delete stale ip rule for %s: %v", dst, err) + errs = append(errs, fmt.Errorf("failed to delete rule %s: %w", dst, err)) + continue + } + klog.V(2).Infof("IP rule del: to %s (stale)", dst) + } + + return errors.Join(errs...) +} + +func (m *linuxRouteManager) cleanup(ctx context.Context) error { + _ = m.cleanupRoutes() + _ = m.cleanupRules() + return nil +} diff --git a/pkg/controllers/c2cc/routes_test.go b/pkg/controllers/c2cc/routes_test.go new file mode 100644 index 0000000000..a865871db8 --- /dev/null +++ b/pkg/controllers/c2cc/routes_test.go @@ -0,0 +1,43 @@ +package c2cc + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewLinuxRouteManager_DesiredState(t *testing.T) { + cfg := testConfigWithRemotes(t, + testRemote("192.168.1.10", []string{"10.45.0.0/16"}, []string{"10.46.0.0/16"}), + ) + + mgr := newLinuxRouteManager(cfg) + + require.Len(t, mgr.desiredDsts, 2) + assert.Equal(t, "10.45.0.0/16", mgr.desiredDsts[0].String()) + assert.Equal(t, "10.46.0.0/16", mgr.desiredDsts[1].String()) + assert.Equal(t, net.ParseIP("192.168.1.10").To4(), mgr.desiredGWs["10.45.0.0/16"].To4()) + assert.Equal(t, net.ParseIP("192.168.1.10").To4(), mgr.desiredGWs["10.46.0.0/16"].To4()) +} + +func TestNewLinuxRouteManager_MultipleRemotes(t *testing.T) { + cfg := testConfigWithRemotes(t, + testRemote("192.168.1.10", []string{"10.45.0.0/16"}, []string{"10.46.0.0/16"}), + testRemote("192.168.1.20", []string{"10.55.0.0/16"}, []string{"10.56.0.0/16"}), + ) + + mgr := newLinuxRouteManager(cfg) + + require.Len(t, mgr.desiredDsts, 4) + assert.Equal(t, net.ParseIP("192.168.1.10").To4(), mgr.desiredGWs["10.45.0.0/16"].To4()) + assert.Equal(t, net.ParseIP("192.168.1.20").To4(), mgr.desiredGWs["10.55.0.0/16"].To4()) +} + +func TestNewLinuxRouteManager_EmptyConfig(t *testing.T) { + cfg := testConfigWithRemotes(t) + mgr := newLinuxRouteManager(cfg) + assert.Empty(t, mgr.desiredDsts) + assert.Empty(t, mgr.desiredGWs) +} diff --git a/pkg/controllers/c2cc/service_routes.go b/pkg/controllers/c2cc/service_routes.go new file mode 100644 index 0000000000..fcf43030f3 --- /dev/null +++ b/pkg/controllers/c2cc/service_routes.go @@ -0,0 +1,211 @@ +package c2cc + +import ( + "context" + "errors" + "fmt" + "net" + "syscall" + + "github.com/openshift/microshift/pkg/config" + "github.com/vishvananda/netlink" + "k8s.io/klog/v2" +) + +const ( + c2ccSvcRouteTable = 201 + c2ccSvcRouteProto = 201 + c2ccSvcRulePriority = 99 + mgmtPortInterface = "ovn-k8s-mp0" +) + +type serviceRouteManager struct { + policyRouteTable + + remoteCIDRs []*net.IPNet + localSvcCIDRs []*net.IPNet +} + +func newServiceRouteManager(cfg *config.Config) *serviceRouteManager { + remoteCIDRs := make([]*net.IPNet, 0, len(cfg.C2CC.Resolved)*4) + for _, rc := range cfg.C2CC.Resolved { + remoteCIDRs = append(remoteCIDRs, rc.ClusterNetwork...) + remoteCIDRs = append(remoteCIDRs, rc.ServiceNetwork...) + } + + var localSvcCIDRs []*net.IPNet + for _, s := range cfg.Network.ServiceNetwork { + _, ipNet, err := net.ParseCIDR(s) + if err != nil { + klog.Warningf("Invalid service network CIDR %q: %v", s, err) + continue + } + localSvcCIDRs = append(localSvcCIDRs, ipNet) + } + + return &serviceRouteManager{ + policyRouteTable: policyRouteTable{ + table: c2ccSvcRouteTable, + proto: c2ccSvcRouteProto, + priority: c2ccSvcRulePriority, + }, + remoteCIDRs: remoteCIDRs, + localSvcCIDRs: localSvcCIDRs, + } +} + +func (m *serviceRouteManager) reconcile(ctx context.Context) error { + gateways, err := getMgmtPortGateways() + if err != nil { + klog.V(4).Infof("Management port not ready: %v, will retry", err) + return nil + } + + var desired []netlink.Route + for _, svcCIDR := range m.localSvcCIDRs { + family := ipFamilyOf(svcCIDR) + gw, ok := gateways[family] + if !ok { + klog.V(4).Infof("No %s gateway on %s for service CIDR %s, skipping", familyName(family), mgmtPortInterface, svcCIDR) + continue + } + desired = append(desired, netlink.Route{ + Dst: svcCIDR, + Gw: gw.ip, + Table: m.table, + Protocol: netlink.RouteProtocol(m.proto), + LinkIndex: gw.linkIdx, + }) + } + + if err := m.reconcileRoutes(desired); err != nil { + return fmt.Errorf("failed to reconcile service routes: %w", err) + } + if err := m.reconcileRules(); err != nil { + return fmt.Errorf("failed to reconcile service rules: %w", err) + } + return nil +} + +func (m *serviceRouteManager) reconcileRules() error { + type ruleKey struct { + src string + dst string + } + + var desired []netlink.Rule + desiredKeys := make(map[ruleKey]bool) + var errs []error + for _, remoteCIDR := range m.remoteCIDRs { + for _, svcCIDR := range m.localSvcCIDRs { + if ipFamilyOf(remoteCIDR) != ipFamilyOf(svcCIDR) { + continue + } + rule := netlink.NewRule() + rule.Src = remoteCIDR + rule.Dst = svcCIDR + rule.Table = m.table + rule.Priority = m.priority + rule.Family = ipFamilyOf(remoteCIDR) + desired = append(desired, *rule) + desiredKeys[ruleKey{src: remoteCIDR.String(), dst: svcCIDR.String()}] = true + } + } + + allRules, err := netlink.RuleList(netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("failed to list ip rules: %w", err) + } + + actualKeys := make(map[ruleKey]netlink.Rule) + for _, r := range allRules { + if r.Priority == m.priority && r.Table == m.table && r.Src != nil && r.Dst != nil { + actualKeys[ruleKey{src: r.Src.String(), dst: r.Dst.String()}] = r + } + } + + for _, r := range desired { + k := ruleKey{src: r.Src.String(), dst: r.Dst.String()} + if _, exists := actualKeys[k]; exists { + continue + } + rule := r + if err := netlink.RuleAdd(&rule); err != nil { + if !errors.Is(err, syscall.EEXIST) { + klog.Errorf("Failed to add service ip rule from %s to %s: %v", rule.Src, rule.Dst, err) + errs = append(errs, fmt.Errorf("failed to add service ip rule from %s to %s: %w", rule.Src, rule.Dst, err)) + } + continue + } + klog.V(2).Infof("Service rule add: from %s to %s lookup %d", rule.Src, rule.Dst, m.table) + } + + for k, r := range actualKeys { + if desiredKeys[k] { + continue + } + rule := r + if err := netlink.RuleDel(&rule); err != nil { + klog.Errorf("Failed to delete stale service rule from %s to %s: %v", k.src, k.dst, err) + errs = append(errs, fmt.Errorf("failed to delete service ip rule from %s to %s: %w", k.src, k.dst, err)) + } + } + + return errors.Join(errs...) +} + +func (m *serviceRouteManager) cleanup(ctx context.Context) error { + _ = m.cleanupRoutes() + _ = m.cleanupRules() + return nil +} + +type mgmtPortGateway struct { + ip net.IP + linkIdx int +} + +func getMgmtPortGateways() (map[int]mgmtPortGateway, error) { + link, err := netlink.LinkByName(mgmtPortInterface) + if err != nil { + return nil, fmt.Errorf("failed to get %s: %w", mgmtPortInterface, err) + } + + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return nil, fmt.Errorf("failed to list addresses on %s: %w", mgmtPortInterface, err) + } + + gateways := make(map[int]mgmtPortGateway) + linkIdx := link.Attrs().Index + + for _, addr := range addrs { + if addr.IP.To4() != nil { + ip4 := addr.IP.To4() + gwIP := make(net.IP, len(ip4)) + copy(gwIP, ip4) + gwIP = gwIP.Mask(addr.Mask) + gwIP[len(gwIP)-1] = 1 + gateways[netlink.FAMILY_V4] = mgmtPortGateway{ip: gwIP, linkIdx: linkIdx} + } else if addr.IP.To16() != nil { + ip6 := make(net.IP, len(addr.IP.To16())) + copy(ip6, addr.IP.To16()) + ip6 = ip6.Mask(addr.Mask) + ip6[len(ip6)-1] = 1 + gateways[netlink.FAMILY_V6] = mgmtPortGateway{ip: ip6, linkIdx: linkIdx} + } + } + + if len(gateways) == 0 { + return nil, fmt.Errorf("failed to find addresses on %s", mgmtPortInterface) + } + + return gateways, nil +} + +func familyName(family int) string { + if family == netlink.FAMILY_V4 { + return "IPv4" + } + return "IPv6" +} diff --git a/pkg/controllers/c2cc/service_routes_test.go b/pkg/controllers/c2cc/service_routes_test.go new file mode 100644 index 0000000000..d228dd1956 --- /dev/null +++ b/pkg/controllers/c2cc/service_routes_test.go @@ -0,0 +1,40 @@ +package c2cc + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewServiceRouteManager_DesiredState(t *testing.T) { + cfg := testConfigWithRemotes(t, + testRemote("192.168.1.10", []string{"10.45.0.0/16"}, []string{"10.46.0.0/16"}), + ) + + mgr := newServiceRouteManager(cfg) + + require.Len(t, mgr.remoteCIDRs, 2) + assert.Equal(t, "10.45.0.0/16", mgr.remoteCIDRs[0].String()) + assert.Equal(t, "10.46.0.0/16", mgr.remoteCIDRs[1].String()) + + require.Len(t, mgr.localSvcCIDRs, 1) + assert.Equal(t, "10.43.0.0/16", mgr.localSvcCIDRs[0].String()) +} + +func TestNewServiceRouteManager_MultipleRemotes(t *testing.T) { + cfg := testConfigWithRemotes(t, + testRemote("192.168.1.10", []string{"10.45.0.0/16"}, []string{"10.46.0.0/16"}), + testRemote("192.168.1.20", []string{"10.55.0.0/16"}, []string{"10.56.0.0/16"}), + ) + + mgr := newServiceRouteManager(cfg) + assert.Len(t, mgr.remoteCIDRs, 4) + assert.Len(t, mgr.localSvcCIDRs, 1) +} + +func TestNewServiceRouteManager_EmptyConfig(t *testing.T) { + cfg := testConfigWithRemotes(t) + mgr := newServiceRouteManager(cfg) + assert.Empty(t, mgr.remoteCIDRs) +} diff --git a/test/assets/c2cc/curl-pod.yaml b/test/assets/c2cc/curl-pod.yaml new file mode 100644 index 0000000000..c633b1df3c --- /dev/null +++ b/test/assets/c2cc/curl-pod.yaml @@ -0,0 +1,19 @@ +kind: Pod +apiVersion: v1 +metadata: + name: curl-pod +spec: + terminationGracePeriodSeconds: 0 + containers: + - name: curl + image: quay.io/curl/curl:8.14.1 + command: ["sleep", "infinity"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault diff --git a/test/assets/c2cc/hello-microshift.yaml b/test/assets/c2cc/hello-microshift.yaml new file mode 100644 index 0000000000..6169449b78 --- /dev/null +++ b/test/assets/c2cc/hello-microshift.yaml @@ -0,0 +1,41 @@ +kind: Pod +apiVersion: v1 +metadata: + name: hello-microshift + labels: + app: hello-microshift +spec: + terminationGracePeriodSeconds: 0 + containers: + - name: hello-microshift + image: quay.io/microshift/busybox:1.36 + command: ["/bin/sh"] + args: ["-c", "while true; do echo -ne \"HTTP/1.0 200 OK\r\nContent-Length: 16\r\n\r\nHello MicroShift\" | nc -l -p 8080 ; done"] + ports: + - containerPort: 8080 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 1001 + runAsGroup: 1001 + seccompProfile: + type: RuntimeDefault +--- +apiVersion: v1 +kind: Service +metadata: + name: hello-microshift + labels: + app: hello-microshift +spec: + selector: + app: hello-microshift + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + type: ClusterIP diff --git a/test/bin/scenario.sh b/test/bin/scenario.sh index 67b8963bc6..2053b7b276 100755 --- a/test/bin/scenario.sh +++ b/test/bin/scenario.sh @@ -1224,6 +1224,27 @@ USHIFT_USER: "${USHIFT_USER:-redhat}" SSH_PRIV_KEY: "${SSH_PRIVATE_KEY:-}" SSH_PORT: ${ssh_port} EOF + # Populate variables for additional VMs in this scenario + local vms_dir="${SCENARIO_INFO_DIR}/${SCENARIO}/vms" + for vm_dir in "${vms_dir}"/*/; do + [ -d "${vm_dir}" ] || continue + local other_vm + other_vm=$(basename "${vm_dir}") + [ "${other_vm}" = "${vmname}" ] && continue + + local var_prefix + var_prefix=$(echo "${other_vm}" | tr '[:lower:]-' '[:upper:]_') + for prop in ip ssh_port api_port lb_port; do + local prop_file="${vm_dir}/${prop}" + [ -f "${prop_file}" ] || continue + local val + val=$(cat "${prop_file}") + local var_name + var_name="${var_prefix}_$(echo "${prop}" | tr '[:lower:]' '[:upper:]')" + echo "${var_name}: ${val}" | tee -a "${variable_file}" + done + done + wait_for_microshift_to_be_ready "${vmname}" fi diff --git a/test/resources/c2cc.resource b/test/resources/c2cc.resource new file mode 100644 index 0000000000..466f463a19 --- /dev/null +++ b/test/resources/c2cc.resource @@ -0,0 +1,232 @@ +*** Settings *** +Documentation Keywords for multi-cluster C2CC test scenarios. +... Provides SSH and oc access to N clusters via alias-based registration. + +Library Collections +Library OperatingSystem +Library Process +Library String +Library SSHLibrary +Resource common.resource +Resource microshift-host.resource + + +*** Variables *** +${CLUSTER_A_POD_CIDR} ${EMPTY} +${CLUSTER_A_SVC_CIDR} ${EMPTY} +${CLUSTER_A_DOMAIN} ${EMPTY} +${CLUSTER_B_POD_CIDR} ${EMPTY} +${CLUSTER_B_SVC_CIDR} ${EMPTY} +${CLUSTER_B_DOMAIN} ${EMPTY} +${HOST2_IP} ${EMPTY} +${HOST2_SSH_PORT} ${EMPTY} +${HOST2_API_PORT} ${EMPTY} +${KUBECONFIG_B} ${EMPTY} +&{C2CC_KUBECONFIGS} &{EMPTY} +&{C2CC_SSH_IDS} &{EMPTY} +@{C2CC_REMOTE_ALIASES} @{EMPTY} + + +*** Keywords *** +Register Local Cluster + [Documentation] Register the primary cluster (host1) for use with generic cluster keywords. + ... Must be called after Login MicroShift Host and Setup Kubeconfig. + [Arguments] ${alias} + ${conn}= SSHLibrary.Get Connection + Set To Dictionary ${C2CC_KUBECONFIGS} ${alias} ${KUBECONFIG} + Set To Dictionary ${C2CC_SSH_IDS} ${alias} ${conn.index} + +Register Remote Cluster + [Documentation] Open an SSH connection to a remote cluster and register it by alias. + [Arguments] ${alias} ${host} ${ssh_port} ${kubeconfig} + IF '${ssh_port}' + SSHLibrary.Open Connection ${host} alias=${alias} port=${ssh_port} + ELSE + SSHLibrary.Open Connection ${host} alias=${alias} + END + IF '${SSH_PRIV_KEY}' + SSHLibrary.Login With Public Key ${USHIFT_USER} ${SSH_PRIV_KEY} keep_alive_interval=30 + ELSE + SSHLibrary.Login ${USHIFT_USER} allow_agent=True keep_alive_interval=30 + END + Set To Dictionary ${C2CC_KUBECONFIGS} ${alias} ${kubeconfig} + Set To Dictionary ${C2CC_SSH_IDS} ${alias} ${alias} + Append To List ${C2CC_REMOTE_ALIASES} ${alias} + +Teardown All Remote Clusters + [Documentation] Close SSH connections for all registered remote clusters + ... and switch back to the local cluster connection so that + ... Logout MicroShift Host can close it. + FOR ${alias} IN @{C2CC_REMOTE_ALIASES} + SSHLibrary.Switch Connection ${alias} + SSHLibrary.Close Connection + END + VAR @{C2CC_REMOTE_ALIASES}= @{EMPTY} scope=SUITE + ${local_conn}= Get From Dictionary ${C2CC_SSH_IDS} cluster-a + SSHLibrary.Switch Connection ${local_conn} + +Command On Cluster + [Documentation] Run a shell command on the specified cluster via SSH. + [Arguments] ${alias} ${command} ${sudo_mode}=True + ${conn_id}= Get From Dictionary ${C2CC_SSH_IDS} ${alias} + SSHLibrary.Switch Connection ${conn_id} + ${stdout} ${stderr} ${rc}= SSHLibrary.Execute Command + ... ${command} + ... sudo=${sudo_mode} + ... return_rc=True + ... return_stderr=True + ... return_stdout=True + Should Be Equal As Integers ${rc} 0 + RETURN ${stdout} + +Disruptive Command On Cluster + [Documentation] Run a shell command on the specified cluster via SSH. + ... Does NOT assert rc=0 — intended for fault-injection commands + ... where the command may race with the controller's reconcile loop. + [Arguments] ${alias} ${command} ${sudo_mode}=True + ${conn_id}= Get From Dictionary ${C2CC_SSH_IDS} ${alias} + SSHLibrary.Switch Connection ${conn_id} + ${stdout} ${stderr} ${rc}= SSHLibrary.Execute Command + ... ${command} + ... sudo=${sudo_mode} + ... return_rc=True + ... return_stderr=True + ... return_stdout=True + Log Disruptive command rc=${rc} stdout=${stdout} stderr=${stderr} + RETURN ${stdout} + +Oc On Cluster + [Documentation] Run an oc command against the specified cluster using its kubeconfig. + [Arguments] ${alias} ${cmd} ${allow_fail}=${FALSE} + ${kc}= Get From Dictionary ${C2CC_KUBECONFIGS} ${alias} + ${stdout_file}= Create Random Temp File + ${result}= Process.Run Process ${cmd} env:KUBECONFIG=${kc} + ... stderr=STDOUT shell=True + ... stdout=${stdout_file} + ... timeout=300s + Log ${result.stdout} + Log ${result.stderr} + IF not ${allow_fail} Should Be Equal As Integers ${result.rc} 0 + ${stdout}= OperatingSystem.Get File ${stdout_file} + ${stdout}= Strip String ${stdout} + RETURN ${stdout} + +Oc Exec On Cluster + [Documentation] Run 'oc exec' on a pod in the specified cluster. + [Arguments] ${alias} ${pod} ${cmd} ${ns}=${EMPTY} + ${ns_arg}= Set Variable If '${ns}' -n ${ns} ${EMPTY} + ${stdout}= Oc On Cluster ${alias} oc exec ${ns_arg} ${pod} -- ${cmd} + RETURN ${stdout} + +Oc Apply On Cluster + [Documentation] Run 'oc apply -f' on the specified cluster. + [Arguments] ${alias} ${file} ${ns}=${EMPTY} + ${ns_arg}= Set Variable If '${ns}' -n ${ns} ${EMPTY} + ${stdout}= Oc On Cluster ${alias} oc apply ${ns_arg} -f ${file} + RETURN ${stdout} + +Verify Routes In Table 200 + [Documentation] Check that routes for the given CIDRs exist in table 200. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${stdout}= Command On Cluster ${alias} ip route show table 200 + Should Contain ${stdout} ${remote_pod_cidr} + Should Contain ${stdout} ${remote_svc_cidr} + +Verify IP Rules For Table 200 + [Documentation] Check that IP rules at priority 100 exist for the given CIDRs. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${stdout}= Command On Cluster ${alias} ip rule show + Should Contain ${stdout} to ${remote_pod_cidr} lookup 200 + Should Contain ${stdout} to ${remote_svc_cidr} lookup 200 + +Verify Routes In Table 201 + [Documentation] Check that service routes exist in table 201 for the local service CIDR. + [Arguments] ${alias} ${local_svc_cidr} + ${stdout}= Command On Cluster ${alias} ip route show table 201 + Should Contain ${stdout} ${local_svc_cidr} + +Verify Service IP Rules + [Documentation] Check that IP rules at priority 99 exist for cross-cluster service routing. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} ${local_svc_cidr} + ${stdout}= Command On Cluster ${alias} ip rule show + Should Contain ${stdout} from ${remote_pod_cidr} to ${local_svc_cidr} lookup 201 + Should Contain ${stdout} from ${remote_svc_cidr} to ${local_svc_cidr} lookup 201 + +Verify NFTables Bypass Rules + [Documentation] Check that nftables masquerade bypass rules exist for remote CIDRs. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${stdout}= Command On Cluster ${alias} + ... nft list chain inet ovn-kubernetes ovn-kube-pod-subnet-masq + Should Contain ${stdout} c2cc-no-masq:${remote_pod_cidr} + Should Contain ${stdout} c2cc-no-masq:${remote_svc_cidr} + +Verify OVN Static Routes + [Documentation] Check that OVN NB static routes tagged with microshift-c2cc exist for remote CIDRs. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${pod}= Oc On Cluster ${alias} + ... oc get pod -n openshift-ovn-kubernetes -l app=ovnkube-master -o jsonpath='{.items[0].metadata.name}' + ${stdout}= Oc On Cluster + ... ${alias} + ... oc exec -n openshift-ovn-kubernetes ${pod} -- ovn-nbctl find Logical_Router_Static_Route external_ids:k8s.ovn.org/owner-controller=microshift-c2cc + Should Contain ${stdout} ${remote_pod_cidr} + Should Contain ${stdout} ${remote_svc_cidr} + +Verify Node SNAT Annotation + [Documentation] Check that the node SNAT-exclude annotation contains the remote CIDRs. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${stdout}= Oc On Cluster ${alias} + ... oc get node -o jsonpath='{.items[0].metadata.annotations.k8s\\.ovn\\.org/node-ingress-snat-exclude-subnets}' + Should Contain ${stdout} ${remote_pod_cidr} + Should Contain ${stdout} ${remote_svc_cidr} + +Verify C2CC Network Policy + [Documentation] Check that the C2CC network policy exists in the default namespace. + [Arguments] ${alias} + ${stdout}= Oc On Cluster + ... ${alias} + ... oc get networkpolicy c2cc-allow-remote-pods -n default -o jsonpath='{.metadata.labels.app\\.kubernetes\\.io/managed-by}' + Should Be Equal As Strings ${stdout} microshift-c2cc + +Verify C2CC Tracking Annotation + [Documentation] Check that the C2CC tracking annotation exists and contains the remote CIDRs. + [Arguments] ${alias} ${remote_pod_cidr} ${remote_svc_cidr} + ${stdout}= Oc On Cluster ${alias} + ... oc get node -o jsonpath='{.items[0].metadata.annotations.microshift\\.io/c2cc-snat-subnets}' + Should Contain ${stdout} ${remote_pod_cidr} + Should Contain ${stdout} ${remote_svc_cidr} + +Get Node SNAT Annotation + [Documentation] Return the raw value of the SNAT-exclude annotation. + [Arguments] ${alias} + ${stdout}= Oc On Cluster ${alias} + ... oc get node -o jsonpath='{.items[0].metadata.annotations.k8s\\.ovn\\.org/node-ingress-snat-exclude-subnets}' + RETURN ${stdout} + +Inject Foreign Subnet Into SNAT Annotation + [Documentation] Add a foreign (non-C2CC) subnet to the SNAT-exclude annotation. + ... Reads the current value, appends the foreign CIDR, and patches the node. + [Arguments] ${alias} ${foreign_cidr} + ${node}= Oc On Cluster ${alias} + ... oc get nodes -o jsonpath='{.items[0].metadata.name}' + ${current}= Get Node SNAT Annotation ${alias} + ${new_value}= Command On Cluster + ... ${alias} + ... echo '${current}' | python3 -c "import sys,json; d=json.load(sys.stdin); d.append('${foreign_cidr}'); print(json.dumps(sorted(d)))" + ... sudo_mode=False + Oc On Cluster ${alias} + ... oc annotate node ${node} k8s.ovn.org/node-ingress-snat-exclude-subnets='${new_value}' --overwrite + +Remove C2CC CIDRs From SNAT Annotation Keeping Foreign + [Documentation] Remove C2CC CIDRs from the SNAT annotation, keeping only the foreign subnet. + ... This simulates external modification that strips C2CC entries. + [Arguments] ${alias} ${foreign_cidr} + ${node}= Oc On Cluster ${alias} + ... oc get nodes -o jsonpath='{.items[0].metadata.name}' + Oc On Cluster ${alias} + ... oc annotate node ${node} k8s.ovn.org/node-ingress-snat-exclude-subnets='["${foreign_cidr}"]' --overwrite + +Remove Foreign Subnet From SNAT Annotation + [Documentation] Remove a foreign subnet from the SNAT annotation to restore clean state. + ... Triggers a reconcile by briefly corrupting the annotation. + [Arguments] ${alias} + Corrupt Node SNAT Annotation On Cluster ${alias} diff --git a/test/scenarios-bootc/el9/presubmits/el98-src@c2cc.sh b/test/scenarios-bootc/el9/presubmits/el98-src@c2cc.sh new file mode 100644 index 0000000000..a5486d688f --- /dev/null +++ b/test/scenarios-bootc/el9/presubmits/el98-src@c2cc.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# Sourced from scenario.sh and uses functions defined there. +export TEST_RANDOMIZATION=none + +# Cluster A (host1): default MicroShift CIDRs +CLUSTER_A_POD_CIDR="10.42.0.0/16" +CLUSTER_A_SVC_CIDR="10.43.0.0/16" +CLUSTER_A_DOMAIN="cluster-a.remote" + +# Cluster B (host2): non-overlapping CIDRs +CLUSTER_B_POD_CIDR="10.45.0.0/16" +CLUSTER_B_SVC_CIDR="10.46.0.0/16" +CLUSTER_B_DOMAIN="cluster-b.remote" + +wait_for_greenboot_on_hosts() { + local junit_label=$1 + local host + for host in host1 host2; do + local host_ip full_host + host_ip=$(get_vm_property "${host}" ip) + full_host=$(full_vm_name "${host}") + if ! wait_for_greenboot "${full_host}" "${host_ip}"; then + record_junit "${host}" "${junit_label}" "FAILED" + return 1 + fi + record_junit "${host}" "${junit_label}" "OK" + done +} + +configure_c2cc_host() { + local host=$1 remote_ip=$2 remote_pod_cidr=$3 remote_svc_cidr=$4 remote_domain=$5 + + run_command_on_vm "${host}" "sudo mkdir -p /etc/microshift/config.d" + run_command_on_vm "${host}" "sudo tee /etc/microshift/config.d/50-c2cc.yaml > /dev/null << EOF +clusterToCluster: + remoteClusters: + - nextHop: ${remote_ip} + clusterNetwork: + - ${remote_pod_cidr} + serviceNetwork: + - ${remote_svc_cidr} + domain: ${remote_domain} +EOF" + + configure_vm_firewall "${host}" + run_command_on_vm "${host}" "sudo firewall-cmd --permanent --zone=trusted --add-source=${remote_pod_cidr}" + run_command_on_vm "${host}" "sudo firewall-cmd --permanent --zone=trusted --add-source=${remote_svc_cidr}" + run_command_on_vm "${host}" "sudo firewall-cmd --reload" + + run_command_on_vm "${host}" "sudo systemctl restart microshift" +} + +configure_c2cc_hosts() { + local -r host1_ip=$(get_vm_property host1 ip) + local -r host2_ip=$(get_vm_property host2 ip) + + wait_for_greenboot_on_hosts "c2cc_pre_greenboot" + + configure_c2cc_host host1 "${host2_ip}" "${CLUSTER_B_POD_CIDR}" "${CLUSTER_B_SVC_CIDR}" "${CLUSTER_B_DOMAIN}" + configure_c2cc_host host2 "${host1_ip}" "${CLUSTER_A_POD_CIDR}" "${CLUSTER_A_SVC_CIDR}" "${CLUSTER_A_DOMAIN}" + + wait_for_greenboot_on_hosts "c2cc_greenboot" +} + +scenario_create_vms() { + prepare_kickstart host1 kickstart-bootc.ks.template rhel98-bootc-source + prepare_kickstart host2 kickstart-bootc.ks.template rhel98-bootc-source + + # Inject host2's non-default CIDRs into its kickstart config so MicroShift + # boots with the correct network from the start (no cleanup-data needed). + local -r host2_ks_dir="${SCENARIO_INFO_DIR}/${SCENARIO}/vms/host2" + cat >> "${host2_ks_dir}/post-microshift.cfg" <>/etc/microshift/config.yaml < b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 0000000000..b9c0c51cd7 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 0000000000..df9d68bce5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000000..8120d0213c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 0000000000..28d58ca37c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml new file mode 100644 index 0000000000..d8cecb0dfe --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: 1.13 +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md new file mode 100644 index 0000000000..d3f2118183 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/README.md @@ -0,0 +1,5 @@ +hub +=== + +[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub) +[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub) diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go new file mode 100644 index 0000000000..24c5efa861 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/hub.go @@ -0,0 +1,82 @@ +// Package hub provides a simple event dispatcher for publish/subscribe pattern. +package hub + +import "sync" + +type Kind int + +// Event is an interface for published events. +type Event interface { + Kind() Kind +} + +// Hub is an event dispatcher, publishes events to the subscribers +// which are subscribed for a specific event type. +// Optimized for publish calls. +// The handlers may be called in order different than they are registered. +type Hub struct { + subscribers map[Kind][]handler + m sync.RWMutex + seq uint64 +} + +type handler struct { + f func(Event) + id uint64 +} + +// Subscribe registers f for the event of a specific kind. +func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) { + var cancelled bool + h.m.Lock() + h.seq++ + id := h.seq + if h.subscribers == nil { + h.subscribers = make(map[Kind][]handler) + } + h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f}) + h.m.Unlock() + return func() { + h.m.Lock() + if cancelled { + h.m.Unlock() + return + } + cancelled = true + a := h.subscribers[kind] + for i, f := range a { + if f.id == id { + a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1] + break + } + } + if len(a) == 0 { + delete(h.subscribers, kind) + } + h.m.Unlock() + } +} + +// Publish an event to the subscribers. +func (h *Hub) Publish(e Event) { + h.m.RLock() + if handlers, ok := h.subscribers[e.Kind()]; ok { + for _, h := range handlers { + h.f(e) + } + } + h.m.RUnlock() +} + +// DefaultHub is the default Hub used by Publish and Subscribe. +var DefaultHub Hub + +// Subscribe registers f for the event of a specific kind in the DefaultHub. +func Subscribe(kind Kind, f func(Event)) (cancel func()) { + return DefaultHub.Subscribe(kind, f) +} + +// Publish an event to the subscribers in DefaultHub. +func Publish(e Event) { + DefaultHub.Publish(e) +} diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml new file mode 100644 index 0000000000..ae8233c2bf --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.15 + - tip + +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE new file mode 100644 index 0000000000..d565b1b1fb --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md new file mode 100644 index 0000000000..da7ffdc417 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/README.md @@ -0,0 +1,81 @@ +rpc2 +==== + +[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2) + +rpc2 is a fork of net/rpc package in the standard library. +The main goal is to add bi-directional support to calls. +That means server can call the methods of client. +This is not possible with net/rpc package. +In order to do this it adds a `*Client` argument to method signatures. + +Install +-------- + + go get github.com/cenkalti/rpc2 + +Example server +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + srv := rpc2.NewServer() + srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error { + + // Reversed call (server to client) + var rep Reply + client.Call("mult", Args{2, 3}, &rep) + fmt.Println("mult result:", rep) + + *reply = Reply(args.A + args.B) + return nil + }) + + lis, _ := net.Listen("tcp", "127.0.0.1:5000") + srv.Accept(lis) +} +``` + +Example Client +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + conn, _ := net.Dial("tcp", "127.0.0.1:5000") + + clt := rpc2.NewClient(conn) + clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error { + *reply = Reply(args.A * args.B) + return nil + }) + go clt.Run() + + var rep Reply + clt.Call("add", Args{1, 2}, &rep) + fmt.Println("add result:", rep) +} +``` diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go new file mode 100644 index 0000000000..cc9956976f --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/client.go @@ -0,0 +1,364 @@ +// Package rpc2 provides bi-directional RPC client and server similar to net/rpc. +package rpc2 + +import ( + "context" + "errors" + "io" + "log" + "reflect" + "sync" +) + +// Client represents an RPC Client. +// There may be multiple outstanding Calls associated +// with a single Client, and a Client may be used by +// multiple goroutines simultaneously. +type Client struct { + mutex sync.Mutex // protects pending, seq, request + sending sync.Mutex + request Request // temp area used in send() + seq uint64 + pending map[uint64]*Call + closing bool + shutdown bool + server bool + codec Codec + handlers map[string]*handler + disconnect chan struct{} + State *State // additional information to associate with client + blocking bool // whether to block request handling +} + +// NewClient returns a new Client to handle requests to the +// set of services at the other end of the connection. +// It adds a buffer to the write side of the connection so +// the header and payload are sent as a unit. +func NewClient(conn io.ReadWriteCloser) *Client { + return NewClientWithCodec(NewGobCodec(conn)) +} + +// NewClientWithCodec is like NewClient but uses the specified +// codec to encode requests and decode responses. +func NewClientWithCodec(codec Codec) *Client { + return &Client{ + codec: codec, + pending: make(map[uint64]*Call), + handlers: make(map[string]*handler), + disconnect: make(chan struct{}), + seq: 1, // 0 means notification. + } +} + +// SetBlocking puts the client in blocking mode. +// In blocking mode, received requests are processes synchronously. +// If you have methods that may take a long time, other subsequent requests may time out. +func (c *Client) SetBlocking(blocking bool) { + c.blocking = blocking +} + +// Run the client's read loop. +// You must run this method before calling any methods on the server. +func (c *Client) Run() { + c.readLoop() +} + +// DisconnectNotify returns a channel that is closed +// when the client connection has gone away. +func (c *Client) DisconnectNotify() chan struct{} { + return c.disconnect +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (c *Client) Handle(method string, handlerFunc interface{}) { + addHandler(c.handlers, method, handlerFunc) +} + +// readLoop reads messages from codec. +// It reads a reqeust or a response to the previous request. +// If the message is request, calls the handler function. +// If the message is response, sends the reply to the associated call. +func (c *Client) readLoop() { + var err error + var req Request + var resp Response + for err == nil { + req = Request{} + resp = Response{} + if err = c.codec.ReadHeader(&req, &resp); err != nil { + break + } + + if req.Method != "" { + // request comes to server + if err = c.readRequest(&req); err != nil { + debugln("rpc2: error reading request:", err.Error()) + } + } else { + // response comes to client + if err = c.readResponse(&resp); err != nil { + debugln("rpc2: error reading response:", err.Error()) + } + } + } + // Terminate pending calls. + c.sending.Lock() + c.mutex.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrShutdown + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.Error = err + call.done() + } + c.mutex.Unlock() + c.sending.Unlock() + if err != io.EOF && !closing && !c.server { + debugln("rpc2: client protocol error:", err) + } + close(c.disconnect) + if !closing { + c.codec.Close() + } +} + +func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) { + // Invoke the method, providing a new value for the reply. + replyv := reflect.New(method.replyType.Elem()) + + returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv}) + + // Do not send response if request is a notification. + if req.Seq == 0 { + return + } + + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + resp := &Response{ + Seq: req.Seq, + Error: errmsg, + } + if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil { + debugln("rpc2: error writing response:", err.Error()) + } +} + +func (c *Client) readRequest(req *Request) error { + method, ok := c.handlers[req.Method] + if !ok { + resp := &Response{ + Seq: req.Seq, + Error: "rpc2: can't find method " + req.Method, + } + return c.codec.WriteResponse(resp, resp) + } + + // Decode the argument value. + var argv reflect.Value + argIsValue := false // if true, need to indirect before calling. + if method.argType.Kind() == reflect.Ptr { + argv = reflect.New(method.argType.Elem()) + } else { + argv = reflect.New(method.argType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err := c.codec.ReadRequestBody(argv.Interface()); err != nil { + return err + } + if argIsValue { + argv = argv.Elem() + } + + if c.blocking { + c.handleRequest(*req, method, argv) + } else { + go c.handleRequest(*req, method, argv) + } + + return nil +} + +func (c *Client) readResponse(resp *Response) error { + seq := resp.Seq + c.mutex.Lock() + call := c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + + var err error + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + call.done() + default: + err = c.codec.ReadResponseBody(call.Reply) + if err != nil { + call.Error = errors.New("reading body " + err.Error()) + } + call.done() + } + + return err +} + +// Close waits for active calls to finish and closes the codec. +func (c *Client) Close() error { + c.mutex.Lock() + if c.shutdown || c.closing { + c.mutex.Unlock() + return ErrShutdown + } + c.closing = true + c.mutex.Unlock() + return c.codec.Close() +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.Method = method + call.Args = args + call.Reply = reply + if done == nil { + done = make(chan *Call, 10) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + log.Panic("rpc2: done channel is unbuffered") + } + } + call.Done = done + c.send(call) + return call +} + +// CallWithContext invokes the named function, waits for it to complete, and +// returns its error status, or an error from Context timeout. +func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error { + call := c.Go(method, args, reply, make(chan *Call, 1)) + select { + case <-call.Done: + return call.Error + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (c *Client) Call(method string, args interface{}, reply interface{}) error { + return c.CallWithContext(context.Background(), method, args, reply) +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + debugln("rpc2: discarding Call reply due to insufficient Done chan capacity") + } +} + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +// ErrShutdown is returned when the connection is closing or closed. +var ErrShutdown = errors.New("connection is shut down") + +// Call represents an active RPC. +type Call struct { + Method string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Strobes when call is complete. +} + +func (c *Client) send(call *Call) { + c.sending.Lock() + defer c.sending.Unlock() + + // Register this call. + c.mutex.Lock() + if c.shutdown || c.closing { + call.Error = ErrShutdown + c.mutex.Unlock() + call.done() + return + } + seq := c.seq + c.seq++ + c.pending[seq] = call + c.mutex.Unlock() + + // Encode and send the request. + c.request.Seq = seq + c.request.Method = call.Method + err := c.codec.WriteRequest(&c.request, call.Args) + if err != nil { + c.mutex.Lock() + call = c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + if call != nil { + call.Error = err + call.done() + } + } +} + +// Notify sends a request to the receiver but does not wait for a return value. +func (c *Client) Notify(method string, args interface{}) error { + c.sending.Lock() + defer c.sending.Unlock() + + if c.shutdown || c.closing { + return ErrShutdown + } + + c.request.Seq = 0 + c.request.Method = method + return c.codec.WriteRequest(&c.request, args) +} diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go new file mode 100644 index 0000000000..b097d9aaa6 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/codec.go @@ -0,0 +1,125 @@ +package rpc2 + +import ( + "bufio" + "encoding/gob" + "io" + "sync" +) + +// A Codec implements reading and writing of RPC requests and responses. +// The client calls ReadHeader to read a message header. +// The implementation must populate either Request or Response argument. +// Depending on which argument is populated, ReadRequestBody or +// ReadResponseBody is called right after ReadHeader. +// ReadRequestBody and ReadResponseBody may be called with a nil +// argument to force the body to be read and then discarded. +type Codec interface { + // ReadHeader must read a message and populate either the request + // or the response by inspecting the incoming message. + ReadHeader(*Request, *Response) error + + // ReadRequestBody into args argument of handler function. + ReadRequestBody(interface{}) error + + // ReadResponseBody into reply argument of handler function. + ReadResponseBody(interface{}) error + + // WriteRequest must be safe for concurrent use by multiple goroutines. + WriteRequest(*Request, interface{}) error + + // WriteResponse must be safe for concurrent use by multiple goroutines. + WriteResponse(*Response, interface{}) error + + // Close is called when client/server finished with the connection. + Close() error +} + +// Request is a header written before every RPC call. +type Request struct { + Seq uint64 // sequence number chosen by client + Method string +} + +// Response is a header written before every RPC return. +type Response struct { + Seq uint64 // echoes that of the request + Error string // error, if any. +} + +type gobCodec struct { + rwc io.ReadWriteCloser + dec *gob.Decoder + enc *gob.Encoder + encBuf *bufio.Writer + mutex sync.Mutex +} + +type message struct { + Seq uint64 + Method string + Error string +} + +// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn. +func NewGobCodec(conn io.ReadWriteCloser) Codec { + buf := bufio.NewWriter(conn) + return &gobCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } +} + +func (c *gobCodec) ReadHeader(req *Request, resp *Response) error { + var msg message + if err := c.dec.Decode(&msg); err != nil { + return err + } + + if msg.Method != "" { + req.Seq = msg.Seq + req.Method = msg.Method + } else { + resp.Seq = msg.Seq + resp.Error = msg.Error + } + return nil +} + +func (c *gobCodec) ReadRequestBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) ReadResponseBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) Close() error { + return c.rwc.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go new file mode 100644 index 0000000000..ec1b625218 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/debug.go @@ -0,0 +1,12 @@ +package rpc2 + +import "log" + +// DebugLog controls the printing of internal and I/O errors. +var DebugLog = false + +func debugln(v ...interface{}) { + if DebugLog { + log.Println(v...) + } +} diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go new file mode 100644 index 0000000000..46fb41a518 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go @@ -0,0 +1,233 @@ +// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package. +// +// Beside struct types, JSONCodec allows using positional arguments. +// Use []interface{} as the type of argument when sending and receiving methods. +// +// Positional arguments example: +// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error { +// *result = args[0].(float64) + args[1].(float64) +// return nil +// }) +// +// var result float64 +// client.Call("add", []interface{}{1, 2}, &result) +// +package jsonrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sync" + + "github.com/cenkalti/rpc2" +) + +type jsonCodec struct { + dec *json.Decoder // for reading JSON values + enc *json.Encoder // for writing JSON values + c io.Closer + + // temporary work space + msg message + serverRequest serverRequest + clientResponse clientResponse + + // JSON-RPC clients can use arbitrary json values as request IDs. + // Package rpc expects uint64 request IDs. + // We assign uint64 sequence numbers to incoming requests + // but save the original request ID in the pending map. + // When rpc responds, we use the sequence number in + // the response to find the original request ID. + mutex sync.Mutex // protects seq, pending + pending map[uint64]*json.RawMessage + seq uint64 +} + +// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn. +func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec { + return &jsonCodec{ + dec: json.NewDecoder(conn), + enc: json.NewEncoder(conn), + c: conn, + pending: make(map[uint64]*json.RawMessage), + } +} + +// serverRequest and clientResponse combined +type message struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// Unmarshal to +type serverRequest struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` +} +type clientResponse struct { + Id uint64 `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// to Marshal +type serverResponse struct { + Id *json.RawMessage `json:"id"` + Result interface{} `json:"result"` + Error interface{} `json:"error"` +} +type clientRequest struct { + Method string `json:"method"` + Params interface{} `json:"params"` + Id *uint64 `json:"id"` +} + +func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error { + c.msg = message{} + if err := c.dec.Decode(&c.msg); err != nil { + return err + } + + if c.msg.Method != "" { + // request comes to server + c.serverRequest.Id = c.msg.Id + c.serverRequest.Method = c.msg.Method + c.serverRequest.Params = c.msg.Params + + req.Method = c.serverRequest.Method + + // JSON request id can be any JSON value; + // RPC package expects uint64. Translate to + // internal uint64 and save JSON on the side. + if c.serverRequest.Id == nil { + // Notification + } else { + c.mutex.Lock() + c.seq++ + c.pending[c.seq] = c.serverRequest.Id + c.serverRequest.Id = nil + req.Seq = c.seq + c.mutex.Unlock() + } + } else { + // response comes to client + err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id) + if err != nil { + return err + } + c.clientResponse.Result = c.msg.Result + c.clientResponse.Error = c.msg.Error + + resp.Error = "" + resp.Seq = c.clientResponse.Id + if c.clientResponse.Error != nil || c.clientResponse.Result == nil { + x, ok := c.clientResponse.Error.(string) + if !ok { + // According to the JSON-RPC spec, the error field can be an object. + // Marshal non-string errors to JSON to handle structured error responses. + // https://www.jsonrpc.org/specification_v1 + errBytes, err := json.Marshal(c.clientResponse.Error) + if err != nil { + return fmt.Errorf("error marshaling error object: %v", err) + } + x = string(errBytes) + } + if x == "" { + x = "unspecified error" + } + resp.Error = x + } + } + return nil +} + +var errMissingParams = errors.New("jsonrpc: request body missing params") + +func (c *jsonCodec) ReadRequestBody(x interface{}) error { + if x == nil { + return nil + } + if c.serverRequest.Params == nil { + return errMissingParams + } + + var err error + + // Check if x points to a slice of any kind + rt := reflect.TypeOf(x) + if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice { + // If it's a slice, unmarshal as is + err = json.Unmarshal(*c.serverRequest.Params, x) + } else { + // Anything else unmarshal into a slice containing x + params := &[]interface{}{x} + err = json.Unmarshal(*c.serverRequest.Params, params) + } + + return err +} + +func (c *jsonCodec) ReadResponseBody(x interface{}) error { + if x == nil { + return nil + } + return json.Unmarshal(*c.clientResponse.Result, x) +} + +func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error { + req := &clientRequest{Method: r.Method} + + // Check if param is a slice of any kind + if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice { + // If it's a slice, leave as is + req.Params = param + } else { + // Put anything else into a slice + req.Params = []interface{}{param} + } + + if r.Seq == 0 { + // Notification + req.Id = nil + } else { + seq := r.Seq + req.Id = &seq + } + return c.enc.Encode(req) +} + +var null = json.RawMessage([]byte("null")) + +func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error { + c.mutex.Lock() + b, ok := c.pending[r.Seq] + if !ok { + c.mutex.Unlock() + return errors.New("invalid sequence number in response") + } + delete(c.pending, r.Seq) + c.mutex.Unlock() + + if b == nil { + // Invalid request so no id. Use JSON null. + b = &null + } + resp := serverResponse{Id: b} + if r.Error == "" { + resp.Result = x + } else { + resp.Error = r.Error + } + return c.enc.Encode(resp) +} + +func (c *jsonCodec) Close() error { + return c.c.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go new file mode 100644 index 0000000000..df4cdc832d --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/server.go @@ -0,0 +1,184 @@ +package rpc2 + +import ( + "errors" + "io" + "log" + "net" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/cenkalti/hub" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() +var typeOfClient = reflect.TypeOf((*Client)(nil)) + +const ( + clientConnected hub.Kind = iota + clientDisconnected +) + +// Server responds to RPC requests made by Client. +type Server struct { + handlers map[string]*handler + eventHub *hub.Hub +} + +type handler struct { + fn reflect.Value + argType reflect.Type + replyType reflect.Type +} + +type connectionEvent struct { + Client *Client +} + +type disconnectionEvent struct { + Client *Client +} + +func (connectionEvent) Kind() hub.Kind { return clientConnected } +func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected } + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + handlers: make(map[string]*handler), + eventHub: &hub.Hub{}, + } +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (s *Server) Handle(method string, handlerFunc interface{}) { + addHandler(s.handlers, method, handlerFunc) +} + +func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) { + if _, ok := handlers[mname]; ok { + panic("rpc2: multiple registrations for " + mname) + } + + method := reflect.ValueOf(handlerFunc) + mtype := method.Type() + // Method needs three ins: *client, *args, *reply. + if mtype.NumIn() != 3 { + log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn()) + } + // First arg must be a pointer to rpc2.Client. + clientType := mtype.In(0) + if clientType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "client type not a pointer:", clientType) + } + if clientType != typeOfClient { + log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client") + } + // Second arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + log.Panicln(mname, "argument type not exported:", argType) + } + // Third arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "reply type not a pointer:", replyType) + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + log.Panicln("method", mname, "reply type not exported:", replyType) + } + // Method needs one out. + if mtype.NumOut() != 1 { + log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut()) + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + log.Panicln("method", mname, "returns", returnType.String(), "not error") + } + handlers[mname] = &handler{ + fn: method, + argType: argType, + replyType: replyType, + } +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} + +// Is this an exported - upper case - name? +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// OnConnect registers a function to run when a client connects. +func (s *Server) OnConnect(f func(*Client)) { + s.eventHub.Subscribe(clientConnected, func(e hub.Event) { + go f(e.(connectionEvent).Client) + }) +} + +// OnDisconnect registers a function to run when a client disconnects. +func (s *Server) OnDisconnect(f func(*Client)) { + s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) { + go f(e.(disconnectionEvent).Client) + }) +} + +// Accept accepts connections on the listener and serves requests +// for each incoming connection. Accept blocks; the caller typically +// invokes it in a go statement. +func (s *Server) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + if !errors.Is(err, net.ErrClosed) { + log.Print("rpc.Serve: accept:", err.Error()) + } + return + } + go s.ServeConn(conn) + } +} + +// ServeConn runs the server on a single connection. +// ServeConn blocks, serving the connection until the client hangs up. +// The caller typically invokes ServeConn in a go statement. +// ServeConn uses the gob wire format (see package gob) on the +// connection. To use an alternate codec, use ServeCodec. +func (s *Server) ServeConn(conn io.ReadWriteCloser) { + s.ServeCodec(NewGobCodec(conn)) +} + +// ServeCodec is like ServeConn but uses the specified codec to +// decode requests and encode responses. +func (s *Server) ServeCodec(codec Codec) { + s.ServeCodecWithState(codec, NewState()) +} + +// ServeCodecWithState is like ServeCodec but also gives the ability to +// associate a state variable with the client that persists across RPC calls. +func (s *Server) ServeCodecWithState(codec Codec, state *State) { + defer codec.Close() + + // Client also handles the incoming connections. + c := NewClientWithCodec(codec) + c.server = true + c.handlers = s.handlers + c.State = state + + s.eventHub.Publish(connectionEvent{c}) + c.Run() + s.eventHub.Publish(disconnectionEvent{c}) +} diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go new file mode 100644 index 0000000000..7a4f23e6d9 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/state.go @@ -0,0 +1,25 @@ +package rpc2 + +import "sync" + +type State struct { + store map[string]interface{} + m sync.RWMutex +} + +func NewState() *State { + return &State{store: make(map[string]interface{})} +} + +func (s *State) Get(key string) (value interface{}, ok bool) { + s.m.RLock() + value, ok = s.store[key] + s.m.RUnlock() + return +} + +func (s *State) Set(key string, value interface{}) { + s.m.Lock() + s.store[key] = value + s.m.Unlock() +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/.gitattributes b/vendor/github.com/gabriel-vasile/mimetype/.gitattributes new file mode 100644 index 0000000000..0cc26ec01c --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/.gitattributes @@ -0,0 +1 @@ +testdata/* linguist-vendored diff --git a/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml new file mode 100644 index 0000000000..f2058ccc57 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml @@ -0,0 +1,5 @@ +version: "2" +linters: + exclusions: + presets: + - std-error-handling diff --git a/vendor/github.com/gabriel-vasile/mimetype/LICENSE b/vendor/github.com/gabriel-vasile/mimetype/LICENSE new file mode 100644 index 0000000000..13b61daa59 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Gabriel Vasile + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gabriel-vasile/mimetype/README.md b/vendor/github.com/gabriel-vasile/mimetype/README.md new file mode 100644 index 0000000000..f28f56c9bf --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/README.md @@ -0,0 +1,103 @@ +

+ mimetype +

+ +

+ A package for detecting MIME types and extensions based on magic numbers +

+
+ Goroutine safe, extensible, no C bindings +
+ +

+ + Go Reference + + + Go report card + + + License + +

+ +## Features +- fast and precise MIME type and file extension detection +- long list of [supported MIME types](supported_mimes.md) +- possibility to [extend](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-Extend) with other file formats +- common file formats are prioritized +- [text vs. binary files differentiation](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-TextVsBinary) +- no external dependencies +- safe for concurrent usage + +## Install +```bash +go get github.com/gabriel-vasile/mimetype +``` + +## Usage +```go +mtype := mimetype.Detect([]byte) +// OR +mtype, err := mimetype.DetectReader(io.Reader) +// OR +mtype, err := mimetype.DetectFile("/path/to/file") +fmt.Println(mtype.String(), mtype.Extension()) +``` +See the [runnable Go Playground examples](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#pkg-overview). + +Caution: only use libraries like **mimetype** as a last resort. Content type detection +using magic numbers is slow, inaccurate, and non-standard. Most of the times +protocols have methods for specifying such metadata; e.g., `Content-Type` header +in HTTP and SMTP. + +## FAQ +Q: My file is in the list of [supported MIME types](supported_mimes.md) but +it is not correctly detected. What should I do? + +A: Some file formats (often Microsoft Office documents) keep their signatures +towards the end of the file. Try increasing the number of bytes used for detection +with: +```go +mimetype.SetLimit(1024*1024) // Set limit to 1MB. +// or +mimetype.SetLimit(0) // No limit, whole file content used. +mimetype.DetectFile("file.doc") +``` +If increasing the limit does not help, please +[open an issue](https://github.com/gabriel-vasile/mimetype/issues/new?assignees=&labels=&template=mismatched-mime-type-detected.md&title=). + +## Tests +In addition to unit tests, +[mimetype_tests](https://github.com/gabriel-vasile/mimetype_tests) compares the +library with the [Unix file utility](https://en.wikipedia.org/wiki/File_(command)) +for around 50 000 sample files. Check the latest comparison results +[here](https://github.com/gabriel-vasile/mimetype_tests/actions). + +## Benchmarks +Benchmarks for each file format are performed when a PR is open. The results can +be seen on the [workflows page](https://github.com/gabriel-vasile/mimetype/actions/workflows/benchmark.yml). +Performance improvements are welcome but correctness is prioritized. + +## Structure +**mimetype** uses a hierarchical structure to keep the MIME type detection logic. +This reduces the number of calls needed for detecting the file type. The reason +behind this choice is that there are file formats used as containers for other +file formats. For example, Microsoft Office files are just zip archives, +containing specific metadata files. Once a file has been identified as a +zip, there is no need to check if it is a text file, but it is worth checking if +it is an Microsoft Office file. + +To prevent loading entire files into memory, when detecting from a +[reader](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectReader) +or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFile) +**mimetype** limits itself to reading only the header of the input. +
+ how project is structured +
+ +## Contributing +Contributions are unexpected but welcome. When submitting a PR for detection of +a new file format, please make sure to add a record to the list of testcases +from [mimetype_test.go](mimetype_test.go). For complex files a record can be added +in the [testdata](testdata) directory. diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go new file mode 100644 index 0000000000..8c5a05e4d5 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go @@ -0,0 +1,283 @@ +package charset + +import ( + "bytes" + "unicode/utf8" + + "github.com/gabriel-vasile/mimetype/internal/markup" + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +const ( + F = 0 /* character never appears in text */ + T = 1 /* character appears in plain ASCII text */ + I = 2 /* character appears in ISO-8859 text */ + X = 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */ +) + +var ( + boms = []struct { + bom []byte + enc string + }{ + {[]byte{0xEF, 0xBB, 0xBF}, "utf-8"}, + {[]byte{0x00, 0x00, 0xFE, 0xFF}, "utf-32be"}, + {[]byte{0xFF, 0xFE, 0x00, 0x00}, "utf-32le"}, + {[]byte{0xFE, 0xFF}, "utf-16be"}, + {[]byte{0xFF, 0xFE}, "utf-16le"}, + } + + // https://github.com/file/file/blob/fa93fb9f7d21935f1c7644c47d2975d31f12b812/src/encoding.c#L241 + textChars = [256]byte{ + /* BEL BS HT LF VT FF CR */ + F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */ + /* ESC */ + F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */ + T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */ + /* NEL */ + X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */ + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */ + I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xfX */ + } +) + +// FromBOM returns the charset declared in the BOM of content. +func FromBOM(content []byte) string { + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + return b.enc + } + } + return "" +} + +// FromPlain returns the charset of a plain text. It relies on BOM presence +// and it falls back on checking each byte in content. +func FromPlain(content []byte) string { + if len(content) == 0 { + return "" + } + if cset := FromBOM(content); cset != "" { + return cset + } + origContent := content + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return "utf-8" + } + + // ASCII is a subset of UTF8. Follow W3C recommendation and replace with UTF8. + if ascii(origContent) { + return "utf-8" + } + + return latin(origContent) +} + +func latin(content []byte) string { + hasControlBytes := false + for _, b := range content { + t := textChars[b] + if t != T && t != I { + return "" + } + if b >= 0x80 && b <= 0x9F { + hasControlBytes = true + } + } + // Code range 0x80 to 0x9F is reserved for control characters in ISO-8859-1 + // (so-called C1 Controls). Windows 1252, however, has printable punctuation + // characters in this range. + if hasControlBytes { + return "windows-1252" + } + return "iso-8859-1" +} + +func ascii(content []byte) bool { + for _, b := range content { + if textChars[b] != T { + return false + } + } + return true +} + +// FromXML returns the charset of an XML document. It relies on the XML +// header and falls back on the plain +// text content. +func FromXML(content []byte) string { + if cset := fromXML(content); cset != "" { + return cset + } + return FromPlain(content) +} +func fromXML(s scan.Bytes) string { + xml := []byte(" and falls back on the +// plain text content. +func FromHTML(content []byte) string { + if cset := FromBOM(content); cset != "" { + return cset + } + if cset := fromHTML(content); cset != "" { + return cset + } + return FromPlain(content) +} + +func fromHTML(s scan.Bytes) string { + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + meta := []byte(" 0 && line[n-1] == '\r' { + return line[:n-1], false // drop \r at end of line + } + + // This line is problematic. The logic from CountFields comes from + // encoding/csv.Reader which relies on mutating the input bytes. + // https://github.com/golang/go/blob/b3251514531123d7fd007682389bce7428d159a0/src/encoding/csv/reader.go#L275-L279 + // To avoid mutating the input, we return cutShort. #680 + if n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' { + return line[:n-2], true + } + return line, false +} + +// CountFields reads one CSV line and counts how many records that line contained. +// hasMore reports whether there are more lines in the input. +// collectIndexes makes CountFields return a list of indexes where CSV fields +// start in the line. These indexes are used to test the correctness against the +// encoding/csv parser. +func (r *Parser) CountFields(collectIndexes bool) (fields int, fieldPos []int, hasMore bool) { + finished := false + var line scan.Bytes + cutShort := false + for { + line, cutShort = r.readLine() + if finished { + return 0, nil, false + } + finished = len(r.s) == 0 && len(line) == 0 + if len(line) == lengthNL(line) { + line = nil + continue // Skip empty lines. + } + if len(line) > 0 && line[0] == r.comment { + line = nil + continue + } + break + } + + indexes := []int{} + originalLine := line +parseField: + for { + if len(line) == 0 || line[0] != '"' { // non-quoted string field + fields++ + if collectIndexes { + indexes = append(indexes, len(originalLine)-len(line)) + } + i := bytes.IndexByte(line, r.comma) + if i >= 0 { + line.Advance(i + 1) // 1 to get over ending comma + continue parseField + } + break parseField + } else { // Quoted string field. + if collectIndexes { + indexes = append(indexes, len(originalLine)-len(line)) + } + line.Advance(1) // get over starting quote + for { + i := bytes.IndexByte(line, '"') + if i >= 0 { + line.Advance(i + 1) // 1 for ending quote + switch rn := line.Peek(); { + case rn == '"': + line.Advance(1) + case rn == r.comma: + line.Advance(1) + fields++ + continue parseField + case lengthNL(line) == len(line): + fields++ + break parseField + } + } else if len(line) > 0 || cutShort { + line, cutShort = r.readLine() + originalLine = line + } else { + fields++ + break parseField + } + } + } + } + + return fields, indexes, fields != 0 +} + +// lengthNL reports the number of bytes for the trailing \n. +func lengthNL(b []byte) int { + if len(b) > 0 && b[len(b)-1] == '\n' { + return 1 + } + return 0 +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go new file mode 100644 index 0000000000..4bc861743c --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go @@ -0,0 +1,478 @@ +package json + +import ( + "bytes" + "sync" +) + +const ( + QueryNone = "json" + QueryGeo = "geo" + QueryHAR = "har" + QueryGLTF = "gltf" + maxRecursion = 4096 +) + +var queries = map[string][]query{ + QueryNone: nil, + QueryGeo: {{ + SearchPath: [][]byte{[]byte("type")}, + SearchVals: [][]byte{ + []byte(`"Feature"`), + []byte(`"FeatureCollection"`), + []byte(`"Point"`), + []byte(`"LineString"`), + []byte(`"Polygon"`), + []byte(`"MultiPoint"`), + []byte(`"MultiLineString"`), + []byte(`"MultiPolygon"`), + []byte(`"GeometryCollection"`), + }, + }}, + QueryHAR: {{ + SearchPath: [][]byte{[]byte("log"), []byte("version")}, + }, { + SearchPath: [][]byte{[]byte("log"), []byte("creator")}, + }, { + SearchPath: [][]byte{[]byte("log"), []byte("entries")}, + }}, + QueryGLTF: {{ + SearchPath: [][]byte{[]byte("asset"), []byte("version")}, + SearchVals: [][]byte{[]byte(`"1.0"`), []byte(`"2.0"`)}, + }}, +} + +var parserPool = sync.Pool{ + New: func() any { + return &parserState{maxRecursion: maxRecursion} + }, +} + +// parserState holds the state of JSON parsing. The number of inspected bytes, +// the current path inside the JSON object, etc. +type parserState struct { + // ib represents the number of inspected bytes. + // Because mimetype limits itself to only reading the header of the file, + // it means sometimes the input JSON can be truncated. In that case, we want + // to still detect it as JSON, even if it's invalid/truncated. + // When ib == len(input) it means the JSON was valid (at least the header). + ib int + maxRecursion int + // currPath keeps a track of the JSON keys parsed up. + // It works only for JSON objects. JSON arrays are ignored + // mainly because the functionality is not needed. + currPath [][]byte + // firstToken stores the first JSON token encountered in input. + // TODO: performance would be better if we would stop parsing as soon + // as we see that first token is not what we are interested in. + firstToken int + // querySatisfied is true if both path and value of any queries passed to + // consumeAny are satisfied. + querySatisfied bool +} + +// query holds information about a combination of {"key": "val"} that we're trying +// to search for inside the JSON. +type query struct { + // SearchPath represents the whole path to look for inside the JSON. + // ex: [][]byte{[]byte("foo"), []byte("bar")} matches {"foo": {"bar": "baz"}} + SearchPath [][]byte + // SearchVals represents values to look for when the SearchPath is found. + // Each SearchVal element is tried until one of them matches (logical OR.) + SearchVals [][]byte +} + +func eq(path1, path2 [][]byte) bool { + if len(path1) != len(path2) { + return false + } + for i := range path1 { + if !bytes.Equal(path1[i], path2[i]) { + return false + } + } + return true +} + +// LooksLikeObjectOrArray reports if first non white space character from raw +// is either { or [. Parsing raw as JSON is a heavy operation. When receiving some +// text input we can skip parsing if the input does not even look like JSON. +func LooksLikeObjectOrArray(raw []byte) bool { + for i := range raw { + if isSpace(raw[i]) { + continue + } + return raw[i] == '{' || raw[i] == '[' + } + + return false +} + +// Parse will take out a parser from the pool depending on queryType and tries +// to parse raw bytes as JSON. +func Parse(queryType string, raw []byte) (parsed, inspected, firstToken int, querySatisfied bool) { + p := parserPool.Get().(*parserState) + defer func() { + // Avoid hanging on to too much memory in extreme input cases. + if len(p.currPath) > 128 { + p.currPath = nil + } + parserPool.Put(p) + }() + p.reset() + + qs := queries[queryType] + got := p.consumeAny(raw, qs, 0) + return got, p.ib, p.firstToken, p.querySatisfied +} + +func (p *parserState) reset() { + p.ib = 0 + p.currPath = p.currPath[0:0] + p.firstToken = TokInvalid + p.querySatisfied = false +} + +func (p *parserState) consumeSpace(b []byte) (n int) { + for len(b) > 0 && isSpace(b[0]) { + b = b[1:] + n++ + p.ib++ + } + return n +} + +func (p *parserState) consumeConst(b, cnst []byte) int { + lb := len(b) + for i, c := range cnst { + if lb > i && b[i] == c { + p.ib++ + } else { + return 0 + } + } + return len(cnst) +} + +func (p *parserState) consumeString(b []byte) (n int) { + var c byte + for len(b[n:]) > 0 { + c, n = b[n], n+1 + p.ib++ + switch c { + case '\\': + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + n++ + p.ib++ + continue + case 'u': + n++ + p.ib++ + for j := 0; j < 4 && len(b[n:]) > 0; j++ { + if !isXDigit(b[n]) { + return 0 + } + n++ + p.ib++ + } + continue + default: + return 0 + } + case '"': + return n + default: + continue + } + } + return 0 +} + +func (p *parserState) consumeNumber(b []byte) (n int) { + got := false + var i int + + if len(b) == 0 { + goto out + } + if b[0] == '-' { + b, i = b[1:], i+1 + p.ib++ + } + + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + if len(b) == 0 { + goto out + } + if b[0] == '.' { + b, i = b[1:], i+1 + p.ib++ + } + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + if len(b) == 0 { + goto out + } + if got && (b[0] == 'e' || b[0] == 'E') { + b, i = b[1:], i+1 + p.ib++ + got = false + if len(b) == 0 { + goto out + } + if b[0] == '+' || b[0] == '-' { + b, i = b[1:], i+1 + p.ib++ + } + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + } +out: + if got { + return i + } + return 0 +} + +func (p *parserState) consumeArray(b []byte, qs []query, lvl int) (n int) { + p.appendPath([]byte{'['}, qs) + if len(b) == 0 { + return 0 + } + + for n < len(b) { + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] == ']' { + p.ib++ + p.popLastPath(qs) + return n + 1 + } + innerParsed := p.consumeAny(b[n:], qs, lvl) + if innerParsed == 0 { + return 0 + } + n += innerParsed + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case ',': + n += 1 + p.ib++ + continue + case ']': + p.ib++ + return n + 1 + default: + return 0 + } + } + return 0 +} + +func queryPathMatch(qs []query, path [][]byte) int { + for i := range qs { + if eq(qs[i].SearchPath, path) { + return i + } + } + return -1 +} + +// appendPath will append a path fragment if queries is not empty. +// If we don't need query functionality (just checking if a JSON is valid), +// then we can skip keeping track of the path we're currently in. +func (p *parserState) appendPath(path []byte, qs []query) { + if len(qs) != 0 { + p.currPath = append(p.currPath, path) + } +} +func (p *parserState) popLastPath(qs []query) { + if len(qs) != 0 { + p.currPath = p.currPath[:len(p.currPath)-1] + } +} + +func (p *parserState) consumeObject(b []byte, qs []query, lvl int) (n int) { + for n < len(b) { + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] == '}' { + p.ib++ + return n + 1 + } + if b[n] != '"' { + return 0 + } else { + n += 1 + p.ib++ + } + // queryMatched stores the index of the query satisfying the current path. + queryMatched := -1 + if keyLen := p.consumeString(b[n:]); keyLen == 0 { + return 0 + } else { + p.appendPath(b[n:n+keyLen-1], qs) + if !p.querySatisfied { + queryMatched = queryPathMatch(qs, p.currPath) + } + n += keyLen + } + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] != ':' { + return 0 + } else { + n += 1 + p.ib++ + } + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + + if valLen := p.consumeAny(b[n:], qs, lvl); valLen == 0 { + return 0 + } else { + if queryMatched != -1 { + q := qs[queryMatched] + if len(q.SearchVals) == 0 { + p.querySatisfied = true + } + for _, val := range q.SearchVals { + if bytes.Equal(val, bytes.TrimSpace(b[n:n+valLen])) { + p.querySatisfied = true + } + } + } + n += valLen + } + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case ',': + p.popLastPath(qs) + n++ + p.ib++ + continue + case '}': + p.popLastPath(qs) + p.ib++ + return n + 1 + default: + return 0 + } + } + return 0 +} + +func (p *parserState) consumeAny(b []byte, qs []query, lvl int) (n int) { + // Avoid too much recursion. + if p.maxRecursion != 0 && lvl > p.maxRecursion { + return 0 + } + if len(qs) == 0 { + p.querySatisfied = true + } + n += p.consumeSpace(b) + if len(b[n:]) == 0 { + return 0 + } + + var t, rv int + switch b[n] { + case '"': + n++ + p.ib++ + rv = p.consumeString(b[n:]) + t = TokString + case '[': + n++ + p.ib++ + rv = p.consumeArray(b[n:], qs, lvl+1) + t = TokArray + case '{': + n++ + p.ib++ + rv = p.consumeObject(b[n:], qs, lvl+1) + t = TokObject + case 't': + rv = p.consumeConst(b[n:], []byte("true")) + t = TokTrue + case 'f': + rv = p.consumeConst(b[n:], []byte("false")) + t = TokFalse + case 'n': + rv = p.consumeConst(b[n:], []byte("null")) + t = TokNull + default: + rv = p.consumeNumber(b[n:]) + t = TokNumber + } + if lvl == 0 { + p.firstToken = t + } + if rv <= 0 { + return n + } + n += rv + n += p.consumeSpace(b[n:]) + return n +} + +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' +} +func isDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +func isXDigit(c byte) bool { + if isDigit(c) { + return true + } + return ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') +} + +const ( + TokInvalid = 0 + TokNull = 1 << iota + TokTrue + TokFalse + TokNumber + TokString + TokArray + TokObject + TokComma +) diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go new file mode 100644 index 0000000000..dd7f2417c6 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go @@ -0,0 +1,163 @@ +package magic + +import ( + "bytes" + "encoding/binary" +) + +var ( + // SevenZ matches a 7z archive. + SevenZ = prefix([]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}) + // Gzip matches gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer. + Gzip = prefix([]byte{0x1f, 0x8b}) + // Fits matches an Flexible Image Transport System file. + Fits = prefix([]byte{ + 0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20, 0x20, 0x3D, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54, + }) + // Xar matches an eXtensible ARchive format file. + Xar = prefix([]byte{0x78, 0x61, 0x72, 0x21}) + // Bz2 matches a bzip2 file. + Bz2 = prefix([]byte{0x42, 0x5A, 0x68}) + // Ar matches an ar (Unix) archive file. + Ar = prefix([]byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E}) + // Deb matches a Debian package file. + Deb = offset([]byte{ + 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D, + 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79, + }, 8) + // Warc matches a Web ARChive file. + Warc = prefix([]byte("WARC/1.0"), []byte("WARC/1.1")) + // Cab matches a Microsoft Cabinet archive file. + Cab = prefix([]byte("MSCF\x00\x00\x00\x00")) + // Xz matches an xz compressed stream based on https://tukaani.org/xz/xz-file-format.txt. + Xz = prefix([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}) + // Lzip matches an Lzip compressed file. + Lzip = prefix([]byte{0x4c, 0x5a, 0x49, 0x50}) + // RPM matches an RPM or Delta RPM package file. + RPM = prefix([]byte{0xed, 0xab, 0xee, 0xdb}, []byte("drpm")) + // Cpio matches a cpio archive file. + Cpio = prefix([]byte("070707"), []byte("070701"), []byte("070702")) + // RAR matches a RAR archive file. + RAR = prefix([]byte("Rar!\x1A\x07\x00"), []byte("Rar!\x1A\x07\x01\x00")) +) + +// InstallShieldCab matches an InstallShield Cabinet archive file. +func InstallShieldCab(raw []byte, _ uint32) bool { + return len(raw) > 7 && + bytes.Equal(raw[0:4], []byte("ISc(")) && + raw[6] == 0 && + (raw[7] == 1 || raw[7] == 2 || raw[7] == 4) +} + +// Zstd matches a Zstandard archive file. +// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md +func Zstd(raw []byte, limit uint32) bool { + if len(raw) < 4 { + return false + } + sig := binary.LittleEndian.Uint32(raw) + // Check for Zstandard frames and skippable frames. + return (sig >= 0xFD2FB522 && sig <= 0xFD2FB528) || + (sig >= 0x184D2A50 && sig <= 0x184D2A5F) +} + +// CRX matches a Chrome extension file: a zip archive prepended by a package header. +func CRX(raw []byte, limit uint32) bool { + const minHeaderLen = 16 + if len(raw) < minHeaderLen || !bytes.HasPrefix(raw, []byte("Cr24")) { + return false + } + pubkeyLen := binary.LittleEndian.Uint32(raw[8:12]) + sigLen := binary.LittleEndian.Uint32(raw[12:16]) + zipOffset := minHeaderLen + pubkeyLen + sigLen + if uint32(len(raw)) < zipOffset { + return false + } + return Zip(raw[zipOffset:], limit) +} + +// Tar matches a (t)ape (ar)chive file. +// Tar files are divided into 512 bytes records. First record contains a 257 +// bytes header padded with NUL. +func Tar(raw []byte, _ uint32) bool { + const sizeRecord = 512 + + // The structure of a tar header: + // type TarHeader struct { + // Name [100]byte + // Mode [8]byte + // Uid [8]byte + // Gid [8]byte + // Size [12]byte + // Mtime [12]byte + // Chksum [8]byte + // Linkflag byte + // Linkname [100]byte + // Magic [8]byte + // Uname [32]byte + // Gname [32]byte + // Devmajor [8]byte + // Devminor [8]byte + // } + + if len(raw) < sizeRecord { + return false + } + raw = raw[:sizeRecord] + + // First 100 bytes of the header represent the file name. + // Check if file looks like Gentoo GLEP binary package. + if bytes.Contains(raw[:100], []byte("/gpkg-1\x00")) { + return false + } + + // Get the checksum recorded into the file. + recsum := tarParseOctal(raw[148:156]) + if recsum == -1 { + return false + } + sum1, sum2 := tarChksum(raw) + return recsum == sum1 || recsum == sum2 +} + +// tarParseOctal converts octal string to decimal int. +func tarParseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need to skip leading NULs. + // Fields may also be padded with spaces or NULs. + // So we remove leading and trailing NULs and spaces to be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return -1 + } + ret := int64(0) + for _, b := range b { + if b == 0 { + break + } + if b < '0' || b > '7' { + return -1 + } + ret = (ret << 3) | int64(b-'0') + } + return ret +} + +// tarChksum computes the checksum for the header block b. +// The actual checksum is written to same b block after it has been calculated. +// Before calculation the bytes from b reserved for checksum have placeholder +// value of ASCII space 0x20. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. We compute and return both. +func tarChksum(b []byte) (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(c) + signed += int64(int8(c)) + } + return unsigned, signed +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go new file mode 100644 index 0000000000..d17e32482c --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go @@ -0,0 +1,76 @@ +package magic + +import ( + "bytes" + "encoding/binary" +) + +var ( + // Flac matches a Free Lossless Audio Codec file. + Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22")) + // Midi matches a Musical Instrument Digital Interface file. + Midi = prefix([]byte("\x4D\x54\x68\x64")) + // Ape matches a Monkey's Audio file. + Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3")) + // MusePack matches a Musepack file. + MusePack = prefix([]byte("MPCK")) + // Au matches a Sun Microsystems au file. + Au = prefix([]byte("\x2E\x73\x6E\x64")) + // Amr matches an Adaptive Multi-Rate file. + Amr = prefix([]byte("\x23\x21\x41\x4D\x52")) + // Voc matches a Creative Voice file. + Voc = prefix([]byte("Creative Voice File")) + // M3u matches a Playlist file. + M3u = prefix([]byte("#EXTM3U")) + // AAC matches an Advanced Audio Coding file. + AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9}) +) + +// Mp3 matches an mp3 file. +func Mp3(raw []byte, limit uint32) bool { + if len(raw) < 3 { + return false + } + + if bytes.HasPrefix(raw, []byte("ID3")) { + // MP3s with an ID3v2 tag will start with "ID3" + // ID3v1 tags, however appear at the end of the file. + return true + } + + // Match MP3 files without tags + switch binary.BigEndian.Uint16(raw[:2]) & 0xFFFE { + case 0xFFFA: + // MPEG ADTS, layer III, v1 + return true + case 0xFFF2: + // MPEG ADTS, layer III, v2 + return true + case 0xFFE2: + // MPEG ADTS, layer III, v2.5 + return true + } + + return false +} + +// Wav matches a Waveform Audio File Format file. +func Wav(raw []byte, limit uint32) bool { + return len(raw) > 12 && + bytes.Equal(raw[:4], []byte("RIFF")) && + bytes.Equal(raw[8:12], []byte{0x57, 0x41, 0x56, 0x45}) +} + +// Aiff matches Audio Interchange File Format file. +func Aiff(raw []byte, limit uint32) bool { + return len(raw) > 12 && + bytes.Equal(raw[:4], []byte{0x46, 0x4F, 0x52, 0x4D}) && + bytes.Equal(raw[8:12], []byte{0x41, 0x49, 0x46, 0x46}) +} + +// Qcp matches a Qualcomm Pure Voice file. +func Qcp(raw []byte, limit uint32) bool { + return len(raw) > 12 && + bytes.Equal(raw[:4], []byte("RIFF")) && + bytes.Equal(raw[8:12], []byte("QLCM")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go new file mode 100644 index 0000000000..70599b3420 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -0,0 +1,203 @@ +package magic + +import ( + "bytes" + "debug/macho" + "encoding/binary" +) + +var ( + // Lnk matches Microsoft lnk binary format. + Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00}) + // Wasm matches a web assembly File Format file. + Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D}) + // Exe matches a Windows/DOS executable file. + Exe = prefix([]byte{0x4D, 0x5A}) + // Elf matches an Executable and Linkable Format file. + Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46}) + // Nes matches a Nintendo Entertainment system ROM file. + Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A}) + // SWF matches an Adobe Flash swf file. + SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS")) + // Torrent has bencoded text in the beginning. + Torrent = prefix([]byte("d8:announce")) + // PAR1 matches a parquet file. + Par1 = prefix([]byte{0x50, 0x41, 0x52, 0x31}) + // CBOR matches a Concise Binary Object Representation https://cbor.io/ + CBOR = prefix([]byte{0xD9, 0xD9, 0xF7}) +) + +// Java bytecode and Mach-O binaries share the same magic number. +// More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe +func classOrMachOFat(in []byte) bool { + // There should be at least 8 bytes for both of them because the only way to + // quickly distinguish them is by comparing byte at position 7 + if len(in) < 8 { + return false + } + + return binary.BigEndian.Uint32(in) == macho.MagicFat +} + +// Class matches a java class file. +func Class(raw []byte, limit uint32) bool { + return classOrMachOFat(raw) && raw[7] > 30 +} + +// MachO matches Mach-O binaries format. +func MachO(raw []byte, limit uint32) bool { + if classOrMachOFat(raw) && raw[7] < 0x14 { + return true + } + + if len(raw) < 4 { + return false + } + + be := binary.BigEndian.Uint32(raw) + le := binary.LittleEndian.Uint32(raw) + + return be == macho.Magic32 || + le == macho.Magic32 || + be == macho.Magic64 || + le == macho.Magic64 +} + +// Dbf matches a dBase file. +// https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm +func Dbf(raw []byte, limit uint32) bool { + if len(raw) < 68 { + return false + } + + // 3rd and 4th bytes contain the last update month and day of month. + if raw[2] == 0 || raw[2] > 12 || raw[3] == 0 || raw[3] > 31 { + return false + } + + // 12, 13, 30, 31 are reserved bytes and always filled with 0x00. + if raw[12] != 0x00 || raw[13] != 0x00 || raw[30] != 0x00 || raw[31] != 0x00 { + return false + } + // Production MDX flag; + // 0x01 if a production .MDX file exists for this table; + // 0x00 if no .MDX file exists. + if raw[28] > 0x01 { + return false + } + + // dbf type is dictated by the first byte. + dbfTypes := []byte{ + 0x02, 0x03, 0x04, 0x05, 0x30, 0x31, 0x32, 0x42, 0x62, 0x7B, 0x82, + 0x83, 0x87, 0x8A, 0x8B, 0x8E, 0xB3, 0xCB, 0xE5, 0xF5, 0xF4, 0xFB, + } + for _, b := range dbfTypes { + if raw[0] == b { + return true + } + } + + return false +} + +// ElfObj matches an object file. +func ElfObj(raw []byte, limit uint32) bool { + return len(raw) > 17 && ((raw[16] == 0x01 && raw[17] == 0x00) || + (raw[16] == 0x00 && raw[17] == 0x01)) +} + +// ElfExe matches an executable file. +func ElfExe(raw []byte, limit uint32) bool { + return len(raw) > 17 && ((raw[16] == 0x02 && raw[17] == 0x00) || + (raw[16] == 0x00 && raw[17] == 0x02)) +} + +// ElfLib matches a shared library file. +func ElfLib(raw []byte, limit uint32) bool { + return len(raw) > 17 && ((raw[16] == 0x03 && raw[17] == 0x00) || + (raw[16] == 0x00 && raw[17] == 0x03)) +} + +// ElfDump matches a core dump file. +func ElfDump(raw []byte, limit uint32) bool { + return len(raw) > 17 && ((raw[16] == 0x04 && raw[17] == 0x00) || + (raw[16] == 0x00 && raw[17] == 0x04)) +} + +// Dcm matches a DICOM medical format file. +func Dcm(raw []byte, limit uint32) bool { + return len(raw) > 131 && + bytes.Equal(raw[128:132], []byte{0x44, 0x49, 0x43, 0x4D}) +} + +// Marc matches a MARC21 (MAchine-Readable Cataloging) file. +func Marc(raw []byte, limit uint32) bool { + // File is at least 24 bytes ("leader" field size). + if len(raw) < 24 { + return false + } + + // Fixed bytes at offset 20. + if !bytes.Equal(raw[20:24], []byte("4500")) { + return false + } + + // First 5 bytes are ASCII digits. + for i := 0; i < 5; i++ { + if raw[i] < '0' || raw[i] > '9' { + return false + } + } + + // Field terminator is present in first 2048 bytes. + return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E}) +} + +// GLB matches a glTF model format file. +// GLB is the binary file format representation of 3D models saved in +// the GL transmission Format (glTF). +// GLB uses little endian and its header structure is as follows: +// +// <-- 12-byte header --> +// | magic | version | length | +// | (uint32) | (uint32) | (uint32) | +// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | +// | g l T F | 1 | ... | +// +// Visit [glTF specification] and [IANA glTF entry] for more details. +// +// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html +// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary +var GLB = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), + []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) + +// TzIf matches a Time Zone Information Format (TZif) file. +// See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3 +// Its header structure is shown below: +// +// +---------------+---+ +// | magic (4) | <-+-- version (1) +// +---------------+---+---------------------------------------+ +// | [unused - reserved for future use] (15) | +// +---------------+---------------+---------------+-----------+ +// | isutccnt (4) | isstdcnt (4) | leapcnt (4) | +// +---------------+---------------+---------------+ +// | timecnt (4) | typecnt (4) | charcnt (4) | +func TzIf(raw []byte, limit uint32) bool { + // File is at least 44 bytes (header size). + if len(raw) < 44 { + return false + } + + if !bytes.HasPrefix(raw, []byte("TZif")) { + return false + } + + // Field "typecnt" MUST not be zero. + if binary.BigEndian.Uint32(raw[36:40]) == 0 { + return false + } + + // Version has to be NUL (0x00), '2' (0x32) or '3' (0x33). + return raw[4] == 0x00 || raw[4] == 0x32 || raw[4] == 0x33 +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go new file mode 100644 index 0000000000..cb1fed12f7 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go @@ -0,0 +1,13 @@ +package magic + +var ( + // Sqlite matches an SQLite database file. + Sqlite = prefix([]byte{ + 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00, + }) + // MsAccessAce matches Microsoft Access dababase file. + MsAccessAce = offset([]byte("Standard ACE DB"), 4) + // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier). + MsAccessMdb = offset([]byte("Standard Jet DB"), 4) +) diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go new file mode 100644 index 0000000000..7f9308db3b --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go @@ -0,0 +1,83 @@ +package magic + +import ( + "bytes" + "encoding/binary" +) + +var ( + // Fdf matches a Forms Data Format file. + Fdf = prefix([]byte("%FDF")) + // Mobi matches a Mobi file. + Mobi = offset([]byte("BOOKMOBI"), 60) + // Lit matches a Microsoft Lit file. + Lit = prefix([]byte("ITOLITLS")) +) + +// PDF matches a Portable Document Format file. +// The %PDF- header should be the first thing inside the file but many +// implementations don't follow the rule. The PDF spec at Appendix H says the +// signature can be prepended by anything. +// https://bugs.astron.com/view.php?id=446 +func PDF(raw []byte, _ uint32) bool { + raw = raw[:min(len(raw), 1024)] + return bytes.Contains(raw, []byte("%PDF-")) +} + +// DjVu matches a DjVu file. +func DjVu(raw []byte, _ uint32) bool { + if len(raw) < 12 { + return false + } + if !bytes.HasPrefix(raw, []byte{0x41, 0x54, 0x26, 0x54, 0x46, 0x4F, 0x52, 0x4D}) { + return false + } + return bytes.HasPrefix(raw[12:], []byte("DJVM")) || + bytes.HasPrefix(raw[12:], []byte("DJVU")) || + bytes.HasPrefix(raw[12:], []byte("DJVI")) || + bytes.HasPrefix(raw[12:], []byte("THUM")) +} + +// P7s matches an .p7s signature File (PEM, Base64). +func P7s(raw []byte, _ uint32) bool { + // Check for PEM Encoding. + if bytes.HasPrefix(raw, []byte("-----BEGIN PKCS7")) { + return true + } + // Check if DER Encoding is long enough. + if len(raw) < 20 { + return false + } + // Magic Bytes for the signedData ASN.1 encoding. + startHeader := [][]byte{{0x30, 0x80}, {0x30, 0x81}, {0x30, 0x82}, {0x30, 0x83}, {0x30, 0x84}} + signedDataMatch := []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07} + // Check if Header is correct. There are multiple valid headers. + for i, match := range startHeader { + // If first bytes match, then check for ASN.1 Object Type. + if bytes.HasPrefix(raw, match) { + if bytes.HasPrefix(raw[i+2:], signedDataMatch) { + return true + } + } + } + + return false +} + +// Lotus123 matches a Lotus 1-2-3 spreadsheet document. +func Lotus123(raw []byte, _ uint32) bool { + if len(raw) <= 20 { + return false + } + version := binary.BigEndian.Uint32(raw) + if version == 0x00000200 { + return raw[6] != 0 && raw[7] == 0 + } + + return version == 0x00001a00 && raw[20] > 0 && raw[20] < 32 +} + +// CHM matches a Microsoft Compiled HTML Help file. +func CHM(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("ITSF\003\000\000\000\x60\000\000\000")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go new file mode 100644 index 0000000000..43af28212e --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go @@ -0,0 +1,39 @@ +package magic + +import ( + "bytes" +) + +var ( + // Woff matches a Web Open Font Format file. + Woff = prefix([]byte("wOFF")) + // Woff2 matches a Web Open Font Format version 2 file. + Woff2 = prefix([]byte("wOF2")) + // Otf matches an OpenType font file. + Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00}) +) + +// Ttf matches a TrueType font file. +func Ttf(raw []byte, limit uint32) bool { + if !bytes.HasPrefix(raw, []byte{0x00, 0x01, 0x00, 0x00}) { + return false + } + return !MsAccessAce(raw, limit) && !MsAccessMdb(raw, limit) +} + +// Eot matches an Embedded OpenType font file. +func Eot(raw []byte, limit uint32) bool { + return len(raw) > 35 && + bytes.Equal(raw[34:36], []byte{0x4C, 0x50}) && + (bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x01}) || + bytes.Equal(raw[8:11], []byte{0x01, 0x00, 0x00}) || + bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x02})) +} + +// Ttc matches a TrueType Collection font file. +func Ttc(raw []byte, limit uint32) bool { + return len(raw) > 7 && + bytes.HasPrefix(raw, []byte("ttcf")) && + (bytes.Equal(raw[4:8], []byte{0x00, 0x01, 0x00, 0x00}) || + bytes.Equal(raw[4:8], []byte{0x00, 0x02, 0x00, 0x00})) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go new file mode 100644 index 0000000000..ac727139ef --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go @@ -0,0 +1,109 @@ +package magic + +import ( + "bytes" +) + +var ( + // AVIF matches an AV1 Image File Format still or animated. + // Wikipedia page seems outdated listing image/avif-sequence for animations. + // https://github.com/AOMediaCodec/av1-avif/issues/59 + AVIF = ftyp([]byte("avif"), []byte("avis")) + // ThreeGP matches a 3GPP file. + ThreeGP = ftyp( + []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"), + []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"), + []byte("3ge6"), []byte("3ge7"), []byte("3gg6"), + ) + // ThreeG2 matches a 3GPP2 file. + ThreeG2 = ftyp( + []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"), + []byte("3g2b"), []byte("3g2c"), []byte("KDDI"), + ) + // AMp4 matches an audio MP4 file. + AMp4 = ftyp( + // audio for Adobe Flash Player 9+ + []byte("F4A "), []byte("F4B "), + // Apple iTunes AAC-LC (.M4A) Audio + []byte("M4B "), []byte("M4P "), + // MPEG-4 (.MP4) for SonyPSP + []byte("MSNV"), + // Nero Digital AAC Audio + []byte("NDAS"), + ) + // Mqv matches a Sony / Mobile QuickTime file. + Mqv = ftyp([]byte("mqt ")) + // M4a matches an audio M4A file. + M4a = ftyp([]byte("M4A ")) + // M4v matches an Appl4 M4V video file. + M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP")) + // Heic matches a High Efficiency Image Coding (HEIC) file. + Heic = ftyp([]byte("heic"), []byte("heix")) + // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence. + HeicSequence = ftyp([]byte("hevc"), []byte("hevx")) + // Heif matches a High Efficiency Image File Format (HEIF) file. + Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic")) + // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence. + HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs")) + // Mj2 matches a Motion JPEG 2000 file: https://en.wikipedia.org/wiki/Motion_JPEG_2000. + Mj2 = ftyp([]byte("mj2s"), []byte("mjp2"), []byte("MFSM"), []byte("MGSV")) + // Dvb matches a Digital Video Broadcasting file: https://dvb.org. + // https://cconcolato.github.io/mp4ra/filetype.html + // https://github.com/file/file/blob/512840337ead1076519332d24fefcaa8fac36e06/magic/Magdir/animation#L135-L154 + Dvb = ftyp( + []byte("dby1"), []byte("dsms"), []byte("dts1"), []byte("dts2"), + []byte("dts3"), []byte("dxo "), []byte("dmb1"), []byte("dmpf"), + []byte("drc1"), []byte("dv1a"), []byte("dv1b"), []byte("dv2a"), + []byte("dv2b"), []byte("dv3a"), []byte("dv3b"), []byte("dvr1"), + []byte("dvt1"), []byte("emsg")) + // TODO: add support for remaining video formats at ftyps.com. +) + +// QuickTime matches a QuickTime File Format file. +// https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml +// https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html#//apple_ref/doc/uid/TP40000939-CH203-38190 +// https://github.com/apache/tika/blob/0f5570691133c75ac4472c3340354a6c4080b104/tika-core/src/main/resources/org/apache/tika/mime/tika-mimetypes.xml#L7758-L7777 +func QuickTime(raw []byte, _ uint32) bool { + if len(raw) < 12 { + return false + } + // First 4 bytes represent the size of the atom as unsigned int. + // Next 4 bytes are the type of the atom. + // For `ftyp` atoms check if first byte in size is 0, otherwise, a text file + // which happens to contain 'ftypqt ' at index 4 will trigger a false positive. + if bytes.Equal(raw[4:12], []byte("ftypqt ")) || + bytes.Equal(raw[4:12], []byte("ftypmoov")) { + return raw[0] == 0x00 + } + basicAtomTypes := [][]byte{ + []byte("moov\x00"), + []byte("mdat\x00"), + []byte("free\x00"), + []byte("skip\x00"), + []byte("pnot\x00"), + } + for _, a := range basicAtomTypes { + if bytes.Equal(raw[4:9], a) { + return true + } + } + return bytes.Equal(raw[:8], []byte("\x00\x00\x00\x08wide")) +} + +// Mp4 detects an .mp4 file. Mp4 detections only does a basic ftyp check. +// Mp4 has many registered and unregistered code points so it's hard to keep track +// of all. Detection will default on video/mp4 for all ftyp files. +// ISO_IEC_14496-12 is the specification for the iso container. +func Mp4(raw []byte, _ uint32) bool { + if len(raw) < 12 { + return false + } + // ftyps are made out of boxes. The first 4 bytes of the box represent + // its size in big-endian uint32. First box is the ftyp box and it is small + // in size. Check most significant byte is 0 to filter out false positive + // text files that happen to contain the string "ftyp" at index 4. + if raw[0] != 0 { + return false + } + return bytes.Equal(raw[4:8], []byte("ftyp")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go new file mode 100644 index 0000000000..cade91f18c --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go @@ -0,0 +1,55 @@ +package magic + +import ( + "bytes" + "encoding/binary" +) + +// Shp matches a shape format file. +// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf +func Shp(raw []byte, limit uint32) bool { + if len(raw) < 112 { + return false + } + + if binary.BigEndian.Uint32(raw[0:4]) != 9994 || + binary.BigEndian.Uint32(raw[4:8]) != 0 || + binary.BigEndian.Uint32(raw[8:12]) != 0 || + binary.BigEndian.Uint32(raw[12:16]) != 0 || + binary.BigEndian.Uint32(raw[16:20]) != 0 || + binary.BigEndian.Uint32(raw[20:24]) != 0 || + binary.LittleEndian.Uint32(raw[28:32]) != 1000 { + return false + } + + shapeTypes := []int{ + 0, // Null shape + 1, // Point + 3, // Polyline + 5, // Polygon + 8, // MultiPoint + 11, // PointZ + 13, // PolylineZ + 15, // PolygonZ + 18, // MultiPointZ + 21, // PointM + 23, // PolylineM + 25, // PolygonM + 28, // MultiPointM + 31, // MultiPatch + } + + for _, st := range shapeTypes { + if st == int(binary.LittleEndian.Uint32(raw[108:112])) { + return true + } + } + + return false +} + +// Shx matches a shape index format file. +// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf +func Shx(raw []byte, limit uint32) bool { + return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x27, 0x0A}) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go new file mode 100644 index 0000000000..0eb7e95f37 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go @@ -0,0 +1,110 @@ +package magic + +import "bytes" + +var ( + // Png matches a Portable Network Graphics file. + // https://www.w3.org/TR/PNG/ + Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}) + // Apng matches an Animated Portable Network Graphics file. + // https://wiki.mozilla.org/APNG_Specification + Apng = offset([]byte("acTL"), 37) + // Jpg matches a Joint Photographic Experts Group file. + Jpg = prefix([]byte{0xFF, 0xD8, 0xFF}) + // Jp2 matches a JPEG 2000 Image file (ISO 15444-1). + Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20}) + // Jpx matches a JPEG 2000 Image file (ISO 15444-2). + Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20}) + // Jpm matches a JPEG 2000 Image file (ISO 15444-6). + Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20}) + // Gif matches a Graphics Interchange Format file. + Gif = prefix([]byte("GIF87a"), []byte("GIF89a")) + // Bmp matches a bitmap image file. + Bmp = prefix([]byte{0x42, 0x4D}) + // Ps matches a PostScript file. + Ps = prefix([]byte("%!PS-Adobe-")) + // Psd matches a Photoshop Document file. + Psd = prefix([]byte("8BPS")) + // Ico matches an ICO file. + Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00}) + // Icns matches an ICNS (Apple Icon Image format) file. + Icns = prefix([]byte("icns")) + // Tiff matches a Tagged Image File Format file. + Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A}) + // Bpg matches a Better Portable Graphics file. + Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB}) + // Xcf matches GIMP image data. + Xcf = prefix([]byte("gimp xcf")) + // Pat matches GIMP pattern data. + Pat = offset([]byte("GPAT"), 20) + // Gbr matches GIMP brush data. + Gbr = offset([]byte("GIMP"), 20) + // Hdr matches Radiance HDR image. + // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/ + Hdr = prefix([]byte("#?RADIANCE\n")) + // Xpm matches X PixMap image data. + Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F}) + // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3). + Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A}) + // Jxr matches Microsoft HD JXR photo file. + Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01}) +) + +func jpeg2k(sig []byte) Detector { + return func(raw []byte, _ uint32) bool { + if len(raw) < 24 { + return false + } + + if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) && + !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) { + return false + } + return bytes.Equal(raw[20:24], sig) + } +} + +// Webp matches a WebP file. +func Webp(raw []byte, _ uint32) bool { + return len(raw) > 12 && + bytes.Equal(raw[0:4], []byte("RIFF")) && + bytes.Equal(raw[8:12], []byte{0x57, 0x45, 0x42, 0x50}) +} + +// Dwg matches a CAD drawing file. +func Dwg(raw []byte, _ uint32) bool { + if len(raw) < 6 || raw[0] != 0x41 || raw[1] != 0x43 { + return false + } + dwgVersions := [][]byte{ + {0x31, 0x2E, 0x34, 0x30}, + {0x31, 0x2E, 0x35, 0x30}, + {0x32, 0x2E, 0x31, 0x30}, + {0x31, 0x30, 0x30, 0x32}, + {0x31, 0x30, 0x30, 0x33}, + {0x31, 0x30, 0x30, 0x34}, + {0x31, 0x30, 0x30, 0x36}, + {0x31, 0x30, 0x30, 0x39}, + {0x31, 0x30, 0x31, 0x32}, + {0x31, 0x30, 0x31, 0x34}, + {0x31, 0x30, 0x31, 0x35}, + {0x31, 0x30, 0x31, 0x38}, + {0x31, 0x30, 0x32, 0x31}, + {0x31, 0x30, 0x32, 0x34}, + {0x31, 0x30, 0x33, 0x32}, + } + + for _, d := range dwgVersions { + if bytes.Equal(raw[2:6], d) { + return true + } + } + + return false +} + +// Jxl matches JPEG XL image file. +func Jxl(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) || + bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go new file mode 100644 index 0000000000..5fe435b99f --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go @@ -0,0 +1,212 @@ +// Package magic holds the matching functions used to find MIME types. +package magic + +import ( + "bytes" + "fmt" + + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +type ( + // Detector receiveѕ the raw data of a file and returns whether the data + // meets any conditions. The limit parameter is an upper limit to the number + // of bytes received and is used to tell if the byte slice represents the + // whole file or is just the header of a file: len(raw) < limit or len(raw)>limit. + Detector func(raw []byte, limit uint32) bool + xmlSig struct { + // the local name of the root tag + localName []byte + // the namespace of the XML document + xmlns []byte + } +) + +// prefix creates a Detector which returns true if any of the provided signatures +// is the prefix of the raw input. +func prefix(sigs ...[]byte) Detector { + return func(raw []byte, limit uint32) bool { + for _, s := range sigs { + if bytes.HasPrefix(raw, s) { + return true + } + } + return false + } +} + +// offset creates a Detector which returns true if the provided signature can be +// found at offset in the raw input. +func offset(sig []byte, offset int) Detector { + return func(raw []byte, limit uint32) bool { + return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig) + } +} + +// ciPrefix is like prefix but the check is case insensitive. +func ciPrefix(sigs ...[]byte) Detector { + return func(raw []byte, limit uint32) bool { + for _, s := range sigs { + if ciCheck(s, raw) { + return true + } + } + return false + } +} +func ciCheck(sig, raw []byte) bool { + if len(raw) < len(sig)+1 { + return false + } + // perform case insensitive check + for i, b := range sig { + db := raw[i] + if 'A' <= b && b <= 'Z' { + db &= 0xDF + } + if b != db { + return false + } + } + + return true +} + +// xml creates a Detector which returns true if any of the provided XML signatures +// matches the raw input. +func xml(sigs ...xmlSig) Detector { + return func(raw []byte, limit uint32) bool { + b := scan.Bytes(raw) + b.TrimLWS() + if len(b) == 0 { + return false + } + for _, s := range sigs { + if xmlCheck(s, b) { + return true + } + } + return false + } +} +func xmlCheck(sig xmlSig, raw []byte) bool { + raw = raw[:min(len(raw), 512)] + + if len(sig.localName) == 0 { + return bytes.Index(raw, sig.xmlns) > 0 + } + if len(sig.xmlns) == 0 { + return bytes.Index(raw, sig.localName) > 0 + } + + localNameIndex := bytes.Index(raw, sig.localName) + return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns) +} + +// markup creates a Detector which returns true is any of the HTML signatures +// matches the raw input. +func markup(sigs ...[]byte) Detector { + return func(raw []byte, limit uint32) bool { + b := scan.Bytes(raw) + if bytes.HasPrefix(b, []byte{0xEF, 0xBB, 0xBF}) { + // We skip the UTF-8 BOM if present to ensure we correctly + // process any leading whitespace. The presence of the BOM + // is taken into account during charset detection in charset.go. + b.Advance(3) + } + b.TrimLWS() + if len(b) == 0 { + return false + } + for _, s := range sigs { + if markupCheck(s, b) { + return true + } + } + return false + } +} +func markupCheck(sig, raw []byte) bool { + if len(raw) < len(sig)+1 { + return false + } + + // perform case insensitive check + for i, b := range sig { + db := raw[i] + if 'A' <= b && b <= 'Z' { + db &= 0xDF + } + if b != db { + return false + } + } + // Next byte must be space or right angle bracket. + if db := raw[len(sig)]; !scan.ByteIsWS(db) && db != '>' { + return false + } + + return true +} + +// ftyp creates a Detector which returns true if any of the FTYP signatures +// matches the raw input. +func ftyp(sigs ...[]byte) Detector { + return func(raw []byte, limit uint32) bool { + if len(raw) < 12 { + return false + } + for _, s := range sigs { + if bytes.Equal(raw[8:12], s) { + return true + } + } + return false + } +} + +func newXMLSig(localName, xmlns string) xmlSig { + ret := xmlSig{xmlns: []byte(xmlns)} + if localName != "" { + ret.localName = []byte(fmt.Sprintf("<%s", localName)) + } + + return ret +} + +// A valid shebang starts with the "#!" characters, +// followed by any number of spaces, +// followed by the path to the interpreter, +// and, optionally, followed by the arguments for the interpreter. +// +// Ex: +// +// #! /usr/bin/env php +// +// /usr/bin/env is the interpreter, php is the first and only argument. +func shebang(sigs ...[]byte) Detector { + return func(raw []byte, limit uint32) bool { + b := scan.Bytes(raw) + line := b.Line() + for _, s := range sigs { + if shebangCheck(s, line) { + return true + } + } + return false + } +} + +func shebangCheck(sig []byte, raw scan.Bytes) bool { + if len(raw) < len(sig)+2 { + return false + } + if raw[0] != '#' || raw[1] != '!' { + return false + } + + raw.Advance(2) // skip #! we checked above + raw.TrimLWS() + raw.TrimRWS() + return bytes.Equal(raw, sig) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go new file mode 100644 index 0000000000..c912823e92 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go @@ -0,0 +1,211 @@ +package magic + +import ( + "bytes" + "encoding/binary" +) + +// Xlsx matches a Microsoft Excel 2007 file. +func Xlsx(raw []byte, limit uint32) bool { + return msoxml(raw, zipEntries{{ + name: []byte("xl/"), + dir: true, + }}, 100) +} + +// Docx matches a Microsoft Word 2007 file. +func Docx(raw []byte, limit uint32) bool { + return msoxml(raw, zipEntries{{ + name: []byte("word/"), + dir: true, + }}, 100) +} + +// Pptx matches a Microsoft PowerPoint 2007 file. +func Pptx(raw []byte, limit uint32) bool { + return msoxml(raw, zipEntries{{ + name: []byte("ppt/"), + dir: true, + }}, 100) +} + +// Visio matches a Microsoft Visio 2013+ file. +func Visio(raw []byte, limit uint32) bool { + return msoxml(raw, zipEntries{{ + name: []byte("visio/"), + dir: true, + }}, 100) +} + +// Ole matches an Open Linking and Embedding file. +// +// https://en.wikipedia.org/wiki/Object_Linking_and_Embedding +func Ole(raw []byte, limit uint32) bool { + return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1}) +} + +// Aaf matches an Advanced Authoring Format file. +// See: https://pyaaf.readthedocs.io/en/latest/about.html +// See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format +func Aaf(raw []byte, limit uint32) bool { + if len(raw) < 31 { + return false + } + return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) && + (raw[30] == 0x09 || raw[30] == 0x0C) +} + +// Doc matches a Microsoft Word 97-2003 file. +// See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py +func Doc(raw []byte, _ uint32) bool { + clsids := [][]byte{ + // Microsoft Word 97-2003 Document (Word.Document.8) + {0x06, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, + // Microsoft Word 6.0-7.0 Document (Word.Document.6) + {0x00, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, + // Microsoft Word Picture (Word.Picture.8) + {0x07, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, + } + + for _, clsid := range clsids { + if matchOleClsid(raw, clsid) { + return true + } + } + + return false +} + +// Ppt matches a Microsoft PowerPoint 97-2003 file or a PowerPoint 95 presentation. +func Ppt(raw []byte, limit uint32) bool { + // Root CLSID test is the safest way to detect identify OLE, however, the format + // often places the root CLSID at the end of the file. + if matchOleClsid(raw, []byte{ + 0x10, 0x8d, 0x81, 0x64, 0x9b, 0x4f, 0xcf, 0x11, + 0x86, 0xea, 0x00, 0xaa, 0x00, 0xb9, 0x29, 0xe8, + }) || matchOleClsid(raw, []byte{ + 0x70, 0xae, 0x7b, 0xea, 0x3b, 0xfb, 0xcd, 0x11, + 0xa9, 0x03, 0x00, 0xaa, 0x00, 0x51, 0x0e, 0xa3, + }) { + return true + } + + lin := len(raw) + if lin < 520 { + return false + } + pptSubHeaders := [][]byte{ + {0xA0, 0x46, 0x1D, 0xF0}, + {0x00, 0x6E, 0x1E, 0xF0}, + {0x0F, 0x00, 0xE8, 0x03}, + } + for _, h := range pptSubHeaders { + if bytes.HasPrefix(raw[512:], h) { + return true + } + } + + if bytes.HasPrefix(raw[512:], []byte{0xFD, 0xFF, 0xFF, 0xFF}) && + raw[518] == 0x00 && raw[519] == 0x00 { + return true + } + + return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)], + []byte("P\x00o\x00w\x00e\x00r\x00P\x00o\x00i\x00n\x00t\x00 D\x00o\x00c\x00u\x00m\x00e\x00n\x00t")) +} + +// Xls matches a Microsoft Excel 97-2003 file. +func Xls(raw []byte, limit uint32) bool { + // Root CLSID test is the safest way to detect identify OLE, however, the format + // often places the root CLSID at the end of the file. + if matchOleClsid(raw, []byte{ + 0x10, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + }) || matchOleClsid(raw, []byte{ + 0x20, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + }) { + return true + } + + lin := len(raw) + if lin < 520 { + return false + } + xlsSubHeaders := [][]byte{ + {0x09, 0x08, 0x10, 0x00, 0x00, 0x06, 0x05, 0x00}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x10}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x1F}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x22}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x23}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x28}, + {0xFD, 0xFF, 0xFF, 0xFF, 0x29}, + } + for _, h := range xlsSubHeaders { + if bytes.HasPrefix(raw[512:], h) { + return true + } + } + + return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)], + []byte("W\x00k\x00s\x00S\x00S\x00W\x00o\x00r\x00k\x00B\x00o\x00o\x00k")) +} + +// Pub matches a Microsoft Publisher file. +func Pub(raw []byte, limit uint32) bool { + return matchOleClsid(raw, []byte{ + 0x01, 0x12, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, + }) +} + +// Msg matches a Microsoft Outlook email file. +func Msg(raw []byte, limit uint32) bool { + return matchOleClsid(raw, []byte{ + 0x0B, 0x0D, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, + }) +} + +// Msi matches a Microsoft Windows Installer file. +// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File +func Msi(raw []byte, limit uint32) bool { + return matchOleClsid(raw, []byte{ + 0x84, 0x10, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, + }) +} + +// One matches a Microsoft OneNote file. +func One(raw []byte, limit uint32) bool { + return bytes.HasPrefix(raw, []byte{ + 0xe4, 0x52, 0x5c, 0x7b, 0x8c, 0xd8, 0xa7, 0x4d, + 0xae, 0xb1, 0x53, 0x78, 0xd0, 0x29, 0x96, 0xd3, + }) +} + +// Helper to match by a specific CLSID of a compound file. +// +// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File +func matchOleClsid(in []byte, clsid []byte) bool { + // Microsoft Compound files v3 have a sector length of 512, while v4 has 4096. + // Change sector offset depending on file version. + // https://www.loc.gov/preservation/digital/formats/fdd/fdd000392.shtml + sectorLength := 512 + if len(in) < sectorLength { + return false + } + if in[26] == 0x04 && in[27] == 0x00 { + sectorLength = 4096 + } + + // SecID of first sector of the directory stream. + firstSecID := int(binary.LittleEndian.Uint32(in[48:52])) + + // Expected offset of CLSID for root storage object. + clsidOffset := sectorLength*(1+firstSecID) + 80 + + if len(in) <= clsidOffset+16 { + return false + } + + return bytes.HasPrefix(in[clsidOffset:], clsid) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/netpbm.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/netpbm.go new file mode 100644 index 0000000000..4baa25767f --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/netpbm.go @@ -0,0 +1,111 @@ +package magic + +import ( + "bytes" + "strconv" + + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +// NetPBM matches a Netpbm Portable BitMap ASCII/Binary file. +// +// See: https://en.wikipedia.org/wiki/Netpbm +func NetPBM(raw []byte, _ uint32) bool { + return netp(raw, "P1\n", "P4\n") +} + +// NetPGM matches a Netpbm Portable GrayMap ASCII/Binary file. +// +// See: https://en.wikipedia.org/wiki/Netpbm +func NetPGM(raw []byte, _ uint32) bool { + return netp(raw, "P2\n", "P5\n") +} + +// NetPPM matches a Netpbm Portable PixMap ASCII/Binary file. +// +// See: https://en.wikipedia.org/wiki/Netpbm +func NetPPM(raw []byte, _ uint32) bool { + return netp(raw, "P3\n", "P6\n") +} + +// NetPAM matches a Netpbm Portable Arbitrary Map file. +// +// See: https://en.wikipedia.org/wiki/Netpbm +func NetPAM(raw []byte, _ uint32) bool { + if !bytes.HasPrefix(raw, []byte("P7\n")) { + return false + } + w, h, d, m, e := false, false, false, false, false + s := scan.Bytes(raw) + var l scan.Bytes + // Read line by line. + for i := 0; i < 128; i++ { + l = s.Line() + // If the line is empty or a comment, skip. + if len(l) == 0 || l.Peek() == '#' { + if len(s) == 0 { + return false + } + continue + } else if bytes.HasPrefix(l, []byte("TUPLTYPE")) { + continue + } else if bytes.HasPrefix(l, []byte("WIDTH ")) { + w = true + } else if bytes.HasPrefix(l, []byte("HEIGHT ")) { + h = true + } else if bytes.HasPrefix(l, []byte("DEPTH ")) { + d = true + } else if bytes.HasPrefix(l, []byte("MAXVAL ")) { + m = true + } else if bytes.HasPrefix(l, []byte("ENDHDR")) { + e = true + } + // When we reached header, return true if we collected all four required headers. + // WIDTH, HEIGHT, DEPTH and MAXVAL. + if e { + return w && h && d && m + } + } + return false +} + +func netp(s scan.Bytes, prefixes ...string) bool { + foundPrefix := "" + for _, p := range prefixes { + if bytes.HasPrefix(s, []byte(p)) { + foundPrefix = p + } + } + if foundPrefix == "" { + return false + } + s.Advance(len(foundPrefix)) // jump over P1, P2, P3, etc. + + var l scan.Bytes + // Read line by line. + for i := 0; i < 128; i++ { + l = s.Line() + // If the line is a comment, skip. + if l.Peek() == '#' { + continue + } + // If line has leading whitespace, then skip over whitespace. + for scan.ByteIsWS(l.Peek()) { + l.Advance(1) + } + if len(s) == 0 || len(l) > 0 { + break + } + } + + // At this point l should be the two integers denoting the size of the matrix. + width := l.PopUntil(scan.ASCIISpaces...) + for scan.ByteIsWS(l.Peek()) { + l.Advance(1) + } + height := l.PopUntil(scan.ASCIISpaces...) + + w, errw := strconv.ParseInt(string(width), 10, 64) + h, errh := strconv.ParseInt(string(height), 10, 64) + return errw == nil && errh == nil && w > 0 && h > 0 +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go new file mode 100644 index 0000000000..bb4cd781b6 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go @@ -0,0 +1,42 @@ +package magic + +import ( + "bytes" +) + +/* + NOTE: + + In May 2003, two Internet RFCs were published relating to the format. + The Ogg bitstream was defined in RFC 3533 (which is classified as + 'informative') and its Internet content type (application/ogg) in RFC + 3534 (which is, as of 2006, a proposed standard protocol). In + September 2008, RFC 3534 was obsoleted by RFC 5334, which added + content types video/ogg, audio/ogg and filename extensions .ogx, .ogv, + .oga, .spx. + + See: + https://tools.ietf.org/html/rfc3533 + https://developer.mozilla.org/en-US/docs/Web/HTTP/Configuring_servers_for_Ogg_media#Serve_media_with_the_correct_MIME_type + https://github.com/file/file/blob/master/magic/Magdir/vorbis +*/ + +// Ogg matches an Ogg file. +func Ogg(raw []byte, limit uint32) bool { + return bytes.HasPrefix(raw, []byte("\x4F\x67\x67\x53\x00")) +} + +// OggAudio matches an audio ogg file. +func OggAudio(raw []byte, limit uint32) bool { + return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x7fFLAC")) || + bytes.HasPrefix(raw[28:], []byte("\x01vorbis")) || + bytes.HasPrefix(raw[28:], []byte("OpusHead")) || + bytes.HasPrefix(raw[28:], []byte("Speex\x20\x20\x20"))) +} + +// OggVideo matches a video ogg file. +func OggVideo(raw []byte, limit uint32) bool { + return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x80theora")) || + bytes.HasPrefix(raw[28:], []byte("fishead\x00")) || + bytes.HasPrefix(raw[28:], []byte("\x01video\x00\x00\x00"))) // OGM video +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go new file mode 100644 index 0000000000..1841ee871d --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go @@ -0,0 +1,411 @@ +package magic + +import ( + "bytes" + "time" + + "github.com/gabriel-vasile/mimetype/internal/charset" + "github.com/gabriel-vasile/mimetype/internal/json" + mkup "github.com/gabriel-vasile/mimetype/internal/markup" + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +var ( + // HTML matches a Hypertext Markup Language file. + HTML = markup( + []byte(" 0 +} + +// NdJSON matches a Newline delimited JSON file. All complete lines from raw +// must be valid JSON documents meaning they contain one of the valid JSON data +// types. +func NdJSON(raw []byte, limit uint32) bool { + lCount, objOrArr := 0, 0 + + s := scan.Bytes(raw) + s.DropLastLine(limit) + var l scan.Bytes + for len(s) != 0 { + l = s.Line() + _, inspected, firstToken, _ := json.Parse(json.QueryNone, l) + if len(l) != inspected { + return false + } + if firstToken == json.TokArray || firstToken == json.TokObject { + objOrArr++ + } + lCount++ + } + + return lCount > 1 && objOrArr > 0 +} + +// Svg matches a SVG file. +func Svg(raw []byte, limit uint32) bool { + return svgWithoutXMLDeclaration(raw) || svgWithXMLDeclaration(raw) +} + +// svgWithoutXMLDeclaration matches a SVG image that does not have an XML header. +// Example: +// +// +// +// +// +func svgWithoutXMLDeclaration(s scan.Bytes) bool { + for scan.ByteIsWS(s.Peek()) { + s.Advance(1) + } + for mkup.SkipAComment(&s) { + } + if !bytes.HasPrefix(s, []byte(" +// +// +// +func svgWithXMLDeclaration(s scan.Bytes) bool { + for scan.ByteIsWS(s.Peek()) { + s.Advance(1) + } + if !bytes.HasPrefix(s, []byte(" 4096 { + s = s[:4096] + } + return hasVersion && bytes.Contains(s, []byte(" 00:02:19,376) limits second line + // length to exactly 29 characters. + if len(line) != 29 { + return false + } + // Decimal separator of fractional seconds in the timestamps must be a + // comma, not a period. + if bytes.IndexByte(line, '.') != -1 { + return false + } + sep := []byte(" --> ") + i := bytes.Index(line, sep) + if i == -1 { + return false + } + const layout = "15:04:05,000" + t0, err := time.Parse(layout, string(line[:i])) + if err != nil { + return false + } + t1, err := time.Parse(layout, string(line[i+len(sep):])) + if err != nil { + return false + } + if t0.After(t1) { + return false + } + + line = s.Line() + // A third line must exist and not be empty. This is the actual subtitle text. + return len(line) != 0 +} + +// Vtt matches a Web Video Text Tracks (WebVTT) file. See +// https://www.iana.org/assignments/media-types/text/vtt. +func Vtt(raw []byte, limit uint32) bool { + // Prefix match. + prefixes := [][]byte{ + {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // UTF-8 BOM, "WEBVTT" and a line feed + {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // UTF-8 BOM, "WEBVTT" and a carriage return + {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // UTF-8 BOM, "WEBVTT" and a space + {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // UTF-8 BOM, "WEBVTT" and a horizontal tab + {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // "WEBVTT" and a line feed + {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // "WEBVTT" and a carriage return + {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // "WEBVTT" and a space + {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // "WEBVTT" and a horizontal tab + } + for _, p := range prefixes { + if bytes.HasPrefix(raw, p) { + return true + } + } + + // Exact match. + return bytes.Equal(raw, []byte{0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) || // UTF-8 BOM and "WEBVTT" + bytes.Equal(raw, []byte{0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) // "WEBVTT" +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go new file mode 100644 index 0000000000..020b5ee75b --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go @@ -0,0 +1,43 @@ +package magic + +import ( + "github.com/gabriel-vasile/mimetype/internal/csv" + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +// CSV matches a comma-separated values file. +func CSV(raw []byte, limit uint32) bool { + return sv(raw, ',', limit) +} + +// TSV matches a tab-separated values file. +func TSV(raw []byte, limit uint32) bool { + return sv(raw, '\t', limit) +} + +func sv(in []byte, comma byte, limit uint32) bool { + s := scan.Bytes(in) + s.DropLastLine(limit) + r := csv.NewParser(comma, '#', s) + + headerFields, _, hasMore := r.CountFields(false) + if headerFields < 2 || !hasMore { + return false + } + csvLines := 1 // 1 for header + for { + fields, _, hasMore := r.CountFields(false) + if !hasMore && fields == 0 { + break + } + csvLines++ + if fields != headerFields { + return false + } + if csvLines >= 10 { + return true + } + } + + return csvLines >= 2 +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go new file mode 100644 index 0000000000..9caf55538a --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go @@ -0,0 +1,85 @@ +package magic + +import ( + "bytes" +) + +var ( + // Flv matches a Flash video file. + Flv = prefix([]byte("\x46\x4C\x56\x01")) + // Asf matches an Advanced Systems Format file. + Asf = prefix([]byte{ + 0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, + 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C, + }) + // Rmvb matches a RealMedia Variable Bitrate file. + Rmvb = prefix([]byte{0x2E, 0x52, 0x4D, 0x46}) +) + +// WebM matches a WebM file. +func WebM(raw []byte, limit uint32) bool { + return isMatroskaFileTypeMatched(raw, "webm") +} + +// Mkv matches a mkv file. +func Mkv(raw []byte, limit uint32) bool { + return isMatroskaFileTypeMatched(raw, "matroska") +} + +// isMatroskaFileTypeMatched is used for webm and mkv file matching. +// It checks for .Eߣ sequence. If the sequence is found, +// then it means it is Matroska media container, including WebM. +// Then it verifies which of the file type it is representing by matching the +// file specific string. +func isMatroskaFileTypeMatched(in []byte, flType string) bool { + if bytes.HasPrefix(in, []byte("\x1A\x45\xDF\xA3")) { + return isFileTypeNamePresent(in, flType) + } + return false +} + +// isFileTypeNamePresent accepts the matroska input data stream and searches +// for the given file type in the stream. Return whether a match is found. +// The logic of search is: find first instance of \x42\x82 and then +// search for given string after n bytes of above instance. +func isFileTypeNamePresent(in []byte, flType string) bool { + ind, maxInd, lenIn := 0, 4096, len(in) + if lenIn < maxInd { // restricting length to 4096 + maxInd = lenIn + } + ind = bytes.Index(in[:maxInd], []byte("\x42\x82")) + if ind > 0 && lenIn > ind+2 { + ind += 2 + + // filetype name will be present exactly + // n bytes after the match of the two bytes "\x42\x82" + n := vintWidth(int(in[ind])) + if lenIn > ind+n { + return bytes.HasPrefix(in[ind+n:], []byte(flType)) + } + } + return false +} + +// vintWidth parses the variable-integer width in matroska containers +func vintWidth(v int) int { + mask, max, num := 128, 8, 1 + for num < max && v&mask == 0 { + mask = mask >> 1 + num++ + } + return num +} + +// Mpeg matches a Moving Picture Experts Group file. +func Mpeg(raw []byte, limit uint32) bool { + return len(raw) > 3 && bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x01}) && + raw[3] >= 0xB0 && raw[3] <= 0xBF +} + +// Avi matches an Audio Video Interleaved file. +func Avi(raw []byte, limit uint32) bool { + return len(raw) > 16 && + bytes.Equal(raw[:4], []byte("RIFF")) && + bytes.Equal(raw[8:16], []byte("AVI LIST")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go new file mode 100644 index 0000000000..17750e6e6f --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go @@ -0,0 +1,189 @@ +package magic + +import ( + "bytes" + + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +var ( + // Odt matches an OpenDocument Text file. + Odt = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text"), 30) + // Ott matches an OpenDocument Text Template file. + Ott = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text-template"), 30) + // Ods matches an OpenDocument Spreadsheet file. + Ods = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"), 30) + // Ots matches an OpenDocument Spreadsheet Template file. + Ots = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"), 30) + // Odp matches an OpenDocument Presentation file. + Odp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation"), 30) + // Otp matches an OpenDocument Presentation Template file. + Otp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"), 30) + // Odg matches an OpenDocument Drawing file. + Odg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics"), 30) + // Otg matches an OpenDocument Drawing Template file. + Otg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"), 30) + // Odf matches an OpenDocument Formula file. + Odf = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.formula"), 30) + // Odc matches an OpenDocument Chart file. + Odc = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.chart"), 30) + // Epub matches an EPUB file. + Epub = offset([]byte("mimetypeapplication/epub+zip"), 30) + // Sxc matches an OpenOffice Spreadsheet file. + Sxc = offset([]byte("mimetypeapplication/vnd.sun.xml.calc"), 30) +) + +// Zip matches a zip archive. +func Zip(raw []byte, limit uint32) bool { + return len(raw) > 3 && + raw[0] == 0x50 && raw[1] == 0x4B && + (raw[2] == 0x3 || raw[2] == 0x5 || raw[2] == 0x7) && + (raw[3] == 0x4 || raw[3] == 0x6 || raw[3] == 0x8) +} + +// Jar matches a Java archive file. There are two types of Jar files: +// 1. the ones that can be opened with jexec and have 0xCAFE optional flag +// https://stackoverflow.com/tags/executable-jar/info +// 2. regular jars, same as above, just without the executable flag +// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=262278#c0 +// There is an argument to only check for manifest, since it's the common nominator +// for both executable and non-executable versions. But the traversing zip entries +// is unreliable because it does linear search for signatures +// (instead of relying on offsets told by the file.) +func Jar(raw []byte, limit uint32) bool { + return executableJar(raw) || + zipHas(raw, zipEntries{{ + name: []byte("META-INF/MANIFEST.MF"), + }, { + name: []byte("META-INF/"), + }}, 1) +} + +// KMZ matches a zipped KML file, which is "doc.kml" by convention. +func KMZ(raw []byte, _ uint32) bool { + return zipHas(raw, zipEntries{{ + name: []byte("doc.kml"), + }}, 100) +} + +// An executable Jar has a 0xCAFE flag enabled in the first zip entry. +// The rule from file/file is: +// >(26.s+30) leshort 0xcafe Java archive data (JAR) +func executableJar(b scan.Bytes) bool { + b.Advance(0x1A) + offset, ok := b.Uint16() + if !ok { + return false + } + b.Advance(int(offset) + 2) + + cafe, ok := b.Uint16() + return ok && cafe == 0xCAFE +} + +// zipIterator iterates over a zip file returning the name of the zip entries +// in that file. +type zipIterator struct { + b scan.Bytes +} + +type zipEntries []struct { + name []byte + dir bool // dir means checking just the prefix of the entry, not the whole path +} + +func (z zipEntries) match(file []byte) bool { + for i := range z { + if z[i].dir && bytes.HasPrefix(file, z[i].name) { + return true + } + if bytes.Equal(file, z[i].name) { + return true + } + } + return false +} + +func zipHas(raw scan.Bytes, searchFor zipEntries, stopAfter int) bool { + iter := zipIterator{raw} + for i := 0; i < stopAfter; i++ { + f := iter.next() + if len(f) == 0 { + break + } + if searchFor.match(f) { + return true + } + } + + return false +} + +// msoxml behaves like zipHas, but it puts restrictions on what the first zip +// entry can be. +func msoxml(raw scan.Bytes, searchFor zipEntries, stopAfter int) bool { + iter := zipIterator{raw} + for i := 0; i < stopAfter; i++ { + f := iter.next() + if len(f) == 0 { + break + } + if searchFor.match(f) { + return true + } + // If the first is not one of the next usually expected entries, + // then abort this check. + if i == 0 { + if !bytes.Equal(f, []byte("[Content_Types].xml")) && + !bytes.Equal(f, []byte("_rels/.rels")) && + !bytes.Equal(f, []byte("docProps")) && + !bytes.Equal(f, []byte("customXml")) && + !bytes.Equal(f, []byte("[trash]")) { + return false + } + } + } + + return false +} + +// next extracts the name of the next zip entry. +func (i *zipIterator) next() []byte { + pk := []byte("PK\003\004") + + n := bytes.Index(i.b, pk) + if n == -1 { + return nil + } + i.b.Advance(n) + if !i.b.Advance(0x1A) { + return nil + } + l, ok := i.b.Uint16() + if !ok { + return nil + } + if !i.b.Advance(0x02) { + return nil + } + if len(i.b) < int(l) { + return nil + } + return i.b[:l] +} + +// APK matches an Android Package Archive. +// The source of signatures is https://github.com/file/file/blob/1778642b8ba3d947a779a36fcd81f8e807220a19/magic/Magdir/archive#L1820-L1887 +func APK(raw []byte, _ uint32) bool { + return zipHas(raw, zipEntries{{ + name: []byte("AndroidManifest.xml"), + }, { + name: []byte("META-INF/com/android/build/gradle/app-metadata.properties"), + }, { + name: []byte("classes.dex"), + }, { + name: []byte("resources.arsc"), + }, { + name: []byte("res/drawable"), + }}, 100) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go b/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go new file mode 100644 index 0000000000..937fa1da59 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go @@ -0,0 +1,103 @@ +// Package markup implements functions for extracting info from +// HTML and XML documents. +package markup + +import ( + "bytes" + + "github.com/gabriel-vasile/mimetype/internal/scan" +) + +func GetAnAttribute(s *scan.Bytes) (name, val string, hasMore bool) { + for scan.ByteIsWS(s.Peek()) || s.Peek() == '/' { + s.Advance(1) + } + if s.Peek() == '>' { + return "", "", false + } + // Allocate 10 to avoid resizes. + // Attribute names and values are continuous slices of bytes in input, + // so we could do without allocating and returning slices of input. + nameB := make([]byte, 0, 10) + // step 4 and 5 + for { + // bap means byte at position in the specification. + bap := s.Pop() + if bap == 0 { + return "", "", false + } + if bap == '=' && len(nameB) > 0 { + val, hasMore := getAValue(s) + return string(nameB), string(val), hasMore + } else if scan.ByteIsWS(bap) { + for scan.ByteIsWS(s.Peek()) { + s.Advance(1) + } + if s.Peek() != '=' { + return string(nameB), "", true + } + s.Advance(1) + for scan.ByteIsWS(s.Peek()) { + s.Advance(1) + } + val, hasMore := getAValue(s) + return string(nameB), string(val), hasMore + } else if bap == '/' || bap == '>' { + return string(nameB), "", false + } else if bap >= 'A' && bap <= 'Z' { + nameB = append(nameB, bap+0x20) + } else { + nameB = append(nameB, bap) + } + } +} + +func getAValue(s *scan.Bytes) (_ []byte, hasMore bool) { + for scan.ByteIsWS(s.Peek()) { + s.Advance(1) + } + origS, end := *s, 0 + bap := s.Pop() + if bap == 0 { + return nil, false + } + end++ + // Step 10 + switch bap { + case '"', '\'': + val := s.PopUntil(bap) + if s.Pop() != bap { + return nil, false + } + return val, s.Peek() != 0 && s.Peek() != '>' + case '>': + return nil, false + } + + // Step 11 + for { + bap = s.Pop() + if bap == 0 { + return nil, false + } + switch { + case scan.ByteIsWS(bap): + return origS[:end], true + case bap == '>': + return origS[:end], false + default: + end++ + } + } +} + +func SkipAComment(s *scan.Bytes) (skipped bool) { + if bytes.HasPrefix(*s, []byte("")); i != -1 { + s.Advance(i + 2 + 3) // 2 comes from len(). + return true + } + } + return false +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go b/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go new file mode 100644 index 0000000000..9f09f0781c --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go @@ -0,0 +1,213 @@ +// Package scan has functions for scanning byte slices. +package scan + +import ( + "bytes" + "encoding/binary" +) + +// Bytes is a byte slice with helper methods for easier scanning. +type Bytes []byte + +func (b *Bytes) Advance(n int) bool { + if n < 0 || len(*b) < n { + return false + } + *b = (*b)[n:] + return true +} + +// TrimLWS trims whitespace from beginning of the bytes. +func (b *Bytes) TrimLWS() { + firstNonWS := 0 + for ; firstNonWS < len(*b) && ByteIsWS((*b)[firstNonWS]); firstNonWS++ { + } + + *b = (*b)[firstNonWS:] +} + +// TrimRWS trims whitespace from the end of the bytes. +func (b *Bytes) TrimRWS() { + lb := len(*b) + for lb > 0 && ByteIsWS((*b)[lb-1]) { + *b = (*b)[:lb-1] + lb-- + } +} + +// Peek one byte from b or 0x00 if b is empty. +func (b *Bytes) Peek() byte { + if len(*b) > 0 { + return (*b)[0] + } + return 0 +} + +// Pop one byte from b or 0x00 if b is empty. +func (b *Bytes) Pop() byte { + if len(*b) > 0 { + ret := (*b)[0] + *b = (*b)[1:] + return ret + } + return 0 +} + +// PopN pops n bytes from b or nil if b is empty. +func (b *Bytes) PopN(n int) []byte { + if len(*b) >= n { + ret := (*b)[:n] + *b = (*b)[n:] + return ret + } + return nil +} + +// PopUntil will advance b until, but not including, the first occurence of stopAt +// character. If no occurence is found, then it will advance until the end of b. +// The returned Bytes is a slice of all the bytes that we're advanced over. +func (b *Bytes) PopUntil(stopAt ...byte) Bytes { + if len(*b) == 0 { + return Bytes{} + } + i := bytes.IndexAny(*b, string(stopAt)) + if i == -1 { + i = len(*b) + } + + prefix := (*b)[:i] + *b = (*b)[i:] + return Bytes(prefix) +} + +// ReadSlice is the same as PopUntil, but the returned value includes stopAt as well. +func (b *Bytes) ReadSlice(stopAt byte) Bytes { + if len(*b) == 0 { + return Bytes{} + } + i := bytes.IndexByte(*b, stopAt) + if i == -1 { + i = len(*b) + } else { + i++ + } + + prefix := (*b)[:i] + *b = (*b)[i:] + return Bytes(prefix) +} + +// Line returns the first line from b and advances b with the length of the +// line. One new line character is trimmed after the line if it exists. +func (b *Bytes) Line() Bytes { + line := b.PopUntil('\n') + lline := len(line) + if lline > 0 && line[lline-1] == '\r' { + line = line[:lline-1] + } + b.Advance(1) + return line +} + +// DropLastLine drops the last incomplete line from b. +// +// mimetype limits itself to ReadLimit bytes when performing a detection. +// This means, for file formats like CSV for NDJSON, the last line of the input +// can be an incomplete line. +// If b length is less than readLimit, it means we received an incomplete file +// and proceed with dropping the last line. +func (b *Bytes) DropLastLine(readLimit uint32) { + if readLimit == 0 || uint32(len(*b)) < readLimit { + return + } + + for i := len(*b) - 1; i > 0; i-- { + if (*b)[i] == '\n' { + *b = (*b)[:i] + return + } + } +} + +func (b *Bytes) Uint16() (uint16, bool) { + if len(*b) < 2 { + return 0, false + } + v := binary.LittleEndian.Uint16(*b) + *b = (*b)[2:] + return v, true +} + +const ( + CompactWS = 1 << iota + IgnoreCase +) + +// Search for occurences of pattern p inside b at any index. +func (b Bytes) Search(p []byte, flags int) int { + if flags == 0 { + return bytes.Index(b, p) + } + + lb, lp := len(b), len(p) + for i := range b { + if lb-i < lp { + return -1 + } + if b[i:].Match(p, flags) { + return i + } + } + + return 0 +} + +// Match pattern p at index 0 of b. +func (b Bytes) Match(p []byte, flags int) bool { + for len(b) > 0 { + // If we finished all we we're looking for from p. + if len(p) == 0 { + return true + } + if flags&IgnoreCase > 0 && isUpper(p[0]) { + if upper(b[0]) != p[0] { + return false + } + b, p = b[1:], p[1:] + } else if flags&CompactWS > 0 && ByteIsWS(p[0]) { + p = p[1:] + if !ByteIsWS(b[0]) { + return false + } + b = b[1:] + if !ByteIsWS(p[0]) { + b.TrimLWS() + } + } else { + if b[0] != p[0] { + return false + } + b, p = b[1:], p[1:] + } + } + return true +} + +func isUpper(c byte) bool { + return c >= 'A' && c <= 'Z' +} +func upper(c byte) byte { + if c >= 'a' && c <= 'z' { + return c - ('a' - 'A') + } + return c +} + +func ByteIsWS(b byte) bool { + return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' ' +} + +var ( + ASCIISpaces = []byte{' ', '\r', '\n', '\x0c', '\t'} + ASCIIDigits = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) diff --git a/vendor/github.com/gabriel-vasile/mimetype/mime.go b/vendor/github.com/gabriel-vasile/mimetype/mime.go new file mode 100644 index 0000000000..b82627a8b8 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/mime.go @@ -0,0 +1,188 @@ +package mimetype + +import ( + "mime" + + "github.com/gabriel-vasile/mimetype/internal/charset" + "github.com/gabriel-vasile/mimetype/internal/magic" +) + +// MIME struct holds information about a file format: the string representation +// of the MIME type, the extension and the parent file format. +type MIME struct { + mime string + aliases []string + extension string + // detector receives the raw input and a limit for the number of bytes it is + // allowed to check. It returns whether the input matches a signature or not. + detector magic.Detector + children []*MIME + parent *MIME +} + +// String returns the string representation of the MIME type, e.g., "application/zip". +func (m *MIME) String() string { + return m.mime +} + +// Extension returns the file extension associated with the MIME type. +// It includes the leading dot, as in ".html". When the file format does not +// have an extension, the empty string is returned. +func (m *MIME) Extension() string { + return m.extension +} + +// Parent returns the parent MIME type from the hierarchy. +// Each MIME type has a non-nil parent, except for the root MIME type. +// +// For example, the application/json and text/html MIME types have text/plain as +// their parent because they are text files who happen to contain JSON or HTML. +// Another example is the ZIP format, which is used as container +// for Microsoft Office files, EPUB files, JAR files, and others. +func (m *MIME) Parent() *MIME { + return m.parent +} + +// Is checks whether this MIME type, or any of its aliases, is equal to the +// expected MIME type. MIME type equality test is done on the "type/subtype" +// section, ignores any optional MIME parameters, ignores any leading and +// trailing whitespace, and is case insensitive. +func (m *MIME) Is(expectedMIME string) bool { + // Parsing is needed because some detected MIME types contain parameters + // that need to be stripped for the comparison. + expectedMIME, _, _ = mime.ParseMediaType(expectedMIME) + found, _, _ := mime.ParseMediaType(m.mime) + + if expectedMIME == found { + return true + } + + for _, alias := range m.aliases { + if alias == expectedMIME { + return true + } + } + + return false +} + +func newMIME( + mime, extension string, + detector magic.Detector, + children ...*MIME) *MIME { + m := &MIME{ + mime: mime, + extension: extension, + detector: detector, + children: children, + } + + for _, c := range children { + c.parent = m + } + + return m +} + +func (m *MIME) alias(aliases ...string) *MIME { + m.aliases = aliases + return m +} + +// match does a depth-first search on the signature tree. It returns the deepest +// successful node for which all the children detection functions fail. +func (m *MIME) match(in []byte, readLimit uint32) *MIME { + for _, c := range m.children { + if c.detector(in, readLimit) { + return c.match(in, readLimit) + } + } + + needsCharset := map[string]func([]byte) string{ + "text/plain": charset.FromPlain, + "text/html": charset.FromHTML, + "text/xml": charset.FromXML, + } + charset := "" + if f, ok := needsCharset[m.mime]; ok { + // The charset comes from BOM, from HTML headers, from XML headers. + // Limit the number of bytes searched for to 1024. + charset = f(in[:min(len(in), 1024)]) + } + if m == root { + return m + } + + return m.cloneHierarchy(charset) +} + +// flatten transforms an hierarchy of MIMEs into a slice of MIMEs. +func (m *MIME) flatten() []*MIME { + out := []*MIME{m} + for _, c := range m.children { + out = append(out, c.flatten()...) + } + + return out +} + +// clone creates a new MIME with the provided optional MIME parameters. +func (m *MIME) clone(charset string) *MIME { + clonedMIME := m.mime + if charset != "" { + clonedMIME = m.mime + "; charset=" + charset + } + + return &MIME{ + mime: clonedMIME, + aliases: m.aliases, + extension: m.extension, + } +} + +// cloneHierarchy creates a clone of m and all its ancestors. The optional MIME +// parameters are set on the last child of the hierarchy. +func (m *MIME) cloneHierarchy(charset string) *MIME { + ret := m.clone(charset) + lastChild := ret + for p := m.Parent(); p != nil; p = p.Parent() { + pClone := p.clone("") + lastChild.parent = pClone + lastChild = pClone + } + + return ret +} + +func (m *MIME) lookup(mime string) *MIME { + for _, n := range append(m.aliases, m.mime) { + if n == mime { + return m + } + } + + for _, c := range m.children { + if m := c.lookup(mime); m != nil { + return m + } + } + return nil +} + +// Extend adds detection for a sub-format. The detector is a function +// returning true when the raw input file satisfies a signature. +// The sub-format will be detected if all the detectors in the parent chain return true. +// The extension should include the leading dot, as in ".html". +func (m *MIME) Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) { + c := &MIME{ + mime: mime, + extension: extension, + detector: detector, + parent: m, + aliases: aliases, + } + + mu.Lock() + m.children = append([]*MIME{c}, m.children...) + mu.Unlock() +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go new file mode 100644 index 0000000000..d8d512b806 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go @@ -0,0 +1,126 @@ +// Package mimetype uses magic number signatures to detect the MIME type of a file. +// +// File formats are stored in a hierarchy with application/octet-stream at its root. +// For example, the hierarchy for HTML format is application/octet-stream -> +// text/plain -> text/html. +package mimetype + +import ( + "io" + "mime" + "os" + "sync/atomic" +) + +var defaultLimit uint32 = 3072 + +// readLimit is the maximum number of bytes from the input used when detecting. +var readLimit uint32 = defaultLimit + +// Detect returns the MIME type found from the provided byte slice. +// +// The result is always a valid MIME type, with application/octet-stream +// returned when identification failed. +func Detect(in []byte) *MIME { + // Using atomic because readLimit can be written at the same time in other goroutine. + l := atomic.LoadUint32(&readLimit) + if l > 0 && len(in) > int(l) { + in = in[:l] + } + mu.RLock() + defer mu.RUnlock() + return root.match(in, l) +} + +// DetectReader returns the MIME type of the provided reader. +// +// The result is always a valid MIME type, with application/octet-stream +// returned when identification failed with or without an error. +// Any error returned is related to the reading from the input reader. +// +// DetectReader assumes the reader offset is at the start. If the input is an +// io.ReadSeeker you previously read from, it should be rewinded before detection: +// +// reader.Seek(0, io.SeekStart) +func DetectReader(r io.Reader) (*MIME, error) { + var in []byte + var err error + + // Using atomic because readLimit can be written at the same time in other goroutine. + l := atomic.LoadUint32(&readLimit) + if l == 0 { + in, err = io.ReadAll(r) + if err != nil { + return errMIME, err + } + } else { + var n int + in = make([]byte, l) + // io.UnexpectedEOF means len(r) < len(in). It is not an error in this case, + // it just means the input file is smaller than the allocated bytes slice. + n, err = io.ReadFull(r, in) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return errMIME, err + } + in = in[:n] + } + + mu.RLock() + defer mu.RUnlock() + return root.match(in, l), nil +} + +// DetectFile returns the MIME type of the provided file. +// +// The result is always a valid MIME type, with application/octet-stream +// returned when identification failed with or without an error. +// Any error returned is related to the opening and reading from the input file. +func DetectFile(path string) (*MIME, error) { + f, err := os.Open(path) + if err != nil { + return errMIME, err + } + defer f.Close() + + return DetectReader(f) +} + +// EqualsAny reports whether s MIME type is equal to any MIME type in mimes. +// MIME type equality test is done on the "type/subtype" section, ignores +// any optional MIME parameters, ignores any leading and trailing whitespace, +// and is case insensitive. +func EqualsAny(s string, mimes ...string) bool { + s, _, _ = mime.ParseMediaType(s) + for _, m := range mimes { + m, _, _ = mime.ParseMediaType(m) + if s == m { + return true + } + } + + return false +} + +// SetLimit sets the maximum number of bytes read from input when detecting the MIME type. +// Increasing the limit provides better detection for file formats which store +// their magical numbers towards the end of the file: docx, pptx, xlsx, etc. +// During detection data is read in a single block of size limit, i.e. it is not buffered. +// A limit of 0 means the whole input file will be used. +func SetLimit(limit uint32) { + // Using atomic because readLimit can be read at the same time in other goroutine. + atomic.StoreUint32(&readLimit, limit) +} + +// Extend adds detection for other file formats. +// It is equivalent to calling Extend() on the root mime type "application/octet-stream". +func Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) { + root.Extend(detector, mime, extension, aliases...) +} + +// Lookup finds a MIME object by its string representation. +// The representation can be the main mime type, or any of its aliases. +func Lookup(mime string) *MIME { + mu.RLock() + defer mu.RUnlock() + return root.lookup(mime) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md new file mode 100644 index 0000000000..3186a8bf0b --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md @@ -0,0 +1,196 @@ +## 191 Supported MIME types +This file is automatically generated when running tests. Do not edit manually. + +Extension | MIME type | Aliases +--------- | --------- | ------- +**n/a** | application/octet-stream | - +**.xpm** | image/x-xpixmap | - +**.7z** | application/x-7z-compressed | - +**.zip** | application/zip | application/x-zip, application/x-zip-compressed +**.docx** | application/vnd.openxmlformats-officedocument.wordprocessingml.document | - +**.pptx** | application/vnd.openxmlformats-officedocument.presentationml.presentation | - +**.xlsx** | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | - +**.epub** | application/epub+zip | - +**.apk** | application/vnd.android.package-archive | - +**.jar** | application/java-archive | application/jar, application/jar-archive, application/x-java-archive +**.odt** | application/vnd.oasis.opendocument.text | application/x-vnd.oasis.opendocument.text +**.ott** | application/vnd.oasis.opendocument.text-template | application/x-vnd.oasis.opendocument.text-template +**.ods** | application/vnd.oasis.opendocument.spreadsheet | application/x-vnd.oasis.opendocument.spreadsheet +**.ots** | application/vnd.oasis.opendocument.spreadsheet-template | application/x-vnd.oasis.opendocument.spreadsheet-template +**.odp** | application/vnd.oasis.opendocument.presentation | application/x-vnd.oasis.opendocument.presentation +**.otp** | application/vnd.oasis.opendocument.presentation-template | application/x-vnd.oasis.opendocument.presentation-template +**.odg** | application/vnd.oasis.opendocument.graphics | application/x-vnd.oasis.opendocument.graphics +**.otg** | application/vnd.oasis.opendocument.graphics-template | application/x-vnd.oasis.opendocument.graphics-template +**.odf** | application/vnd.oasis.opendocument.formula | application/x-vnd.oasis.opendocument.formula +**.odc** | application/vnd.oasis.opendocument.chart | application/x-vnd.oasis.opendocument.chart +**.sxc** | application/vnd.sun.xml.calc | - +**.kmz** | application/vnd.google-earth.kmz | - +**.vsdx** | application/vnd.ms-visio.drawing.main+xml | - +**.pdf** | application/pdf | application/x-pdf +**.fdf** | application/vnd.fdf | - +**n/a** | application/x-ole-storage | - +**.msi** | application/x-ms-installer | application/x-windows-installer, application/x-msi +**.aaf** | application/octet-stream | - +**.msg** | application/vnd.ms-outlook | - +**.xls** | application/vnd.ms-excel | application/msexcel +**.pub** | application/vnd.ms-publisher | - +**.ppt** | application/vnd.ms-powerpoint | application/mspowerpoint +**.doc** | application/msword | application/vnd.ms-word +**.ps** | application/postscript | - +**.psd** | image/vnd.adobe.photoshop | image/x-psd, application/photoshop +**.p7s** | application/pkcs7-signature | - +**.ogg** | application/ogg | application/x-ogg +**.oga** | audio/ogg | - +**.ogv** | video/ogg | - +**.png** | image/png | - +**.png** | image/vnd.mozilla.apng | - +**.jpg** | image/jpeg | - +**.jxl** | image/jxl | - +**.jp2** | image/jp2 | - +**.jpf** | image/jpx | - +**.jpm** | image/jpm | video/jpm +**.jxs** | image/jxs | - +**.gif** | image/gif | - +**.webp** | image/webp | - +**.exe** | application/vnd.microsoft.portable-executable | - +**n/a** | application/x-elf | - +**n/a** | application/x-object | - +**n/a** | application/x-executable | - +**.so** | application/x-sharedlib | - +**n/a** | application/x-coredump | - +**.a** | application/x-archive | application/x-unix-archive +**.deb** | application/vnd.debian.binary-package | - +**.tar** | application/x-tar | - +**.xar** | application/x-xar | - +**.bz2** | application/x-bzip2 | - +**.fits** | application/fits | image/fits +**.tiff** | image/tiff | - +**.bmp** | image/bmp | image/x-bmp, image/x-ms-bmp +**.123** | application/vnd.lotus-1-2-3 | - +**.ico** | image/x-icon | - +**.mp3** | audio/mpeg | audio/x-mpeg, audio/mp3 +**.flac** | audio/flac | - +**.midi** | audio/midi | audio/mid, audio/sp-midi, audio/x-mid, audio/x-midi +**.ape** | audio/ape | - +**.mpc** | audio/musepack | - +**.amr** | audio/amr | audio/amr-nb +**.wav** | audio/wav | audio/x-wav, audio/vnd.wave, audio/wave +**.aiff** | audio/aiff | audio/x-aiff +**.au** | audio/basic | - +**.mpeg** | video/mpeg | - +**.mov** | video/quicktime | - +**.mp4** | video/mp4 | - +**.avif** | image/avif | - +**.3gp** | video/3gpp | video/3gp, audio/3gpp +**.3g2** | video/3gpp2 | video/3g2, audio/3gpp2 +**.mp4** | audio/mp4 | audio/x-mp4a +**.mqv** | video/quicktime | - +**.m4a** | audio/x-m4a | - +**.m4v** | video/x-m4v | - +**.heic** | image/heic | - +**.heic** | image/heic-sequence | - +**.heif** | image/heif | - +**.heif** | image/heif-sequence | - +**.mj2** | video/mj2 | - +**.dvb** | video/vnd.dvb.file | - +**.webm** | video/webm | audio/webm +**.avi** | video/x-msvideo | video/avi, video/msvideo +**.flv** | video/x-flv | - +**.mkv** | video/x-matroska | - +**.asf** | video/x-ms-asf | video/asf, video/x-ms-wmv +**.aac** | audio/aac | - +**.voc** | audio/x-unknown | - +**.m3u** | application/vnd.apple.mpegurl | audio/mpegurl +**.rmvb** | application/vnd.rn-realmedia-vbr | - +**.gz** | application/gzip | application/x-gzip, application/x-gunzip, application/gzipped, application/gzip-compressed, application/x-gzip-compressed, gzip/document +**.class** | application/x-java-applet | - +**.swf** | application/x-shockwave-flash | - +**.crx** | application/x-chrome-extension | - +**.ttf** | font/ttf | font/sfnt, application/x-font-ttf, application/font-sfnt +**.woff** | font/woff | - +**.woff2** | font/woff2 | - +**.otf** | font/otf | - +**.ttc** | font/collection | - +**.eot** | application/vnd.ms-fontobject | - +**.wasm** | application/wasm | - +**.shx** | application/vnd.shx | - +**.shp** | application/vnd.shp | - +**.dbf** | application/x-dbf | - +**.dcm** | application/dicom | - +**.rar** | application/x-rar-compressed | application/x-rar +**.djvu** | image/vnd.djvu | - +**.mobi** | application/x-mobipocket-ebook | - +**.lit** | application/x-ms-reader | - +**.bpg** | image/bpg | - +**.cbor** | application/cbor | - +**.sqlite** | application/vnd.sqlite3 | application/x-sqlite3 +**.dwg** | image/vnd.dwg | image/x-dwg, application/acad, application/x-acad, application/autocad_dwg, application/dwg, application/x-dwg, application/x-autocad, drawing/dwg +**.nes** | application/vnd.nintendo.snes.rom | - +**.lnk** | application/x-ms-shortcut | - +**.macho** | application/x-mach-binary | - +**.qcp** | audio/qcelp | - +**.icns** | image/x-icns | - +**.hdr** | image/vnd.radiance | - +**.mrc** | application/marc | - +**.mdb** | application/x-msaccess | - +**.accdb** | application/x-msaccess | - +**.zst** | application/zstd | - +**.cab** | application/vnd.ms-cab-compressed | - +**.rpm** | application/x-rpm | - +**.xz** | application/x-xz | - +**.lz** | application/lzip | application/x-lzip +**.torrent** | application/x-bittorrent | - +**.cpio** | application/x-cpio | - +**n/a** | application/tzif | - +**.xcf** | image/x-xcf | - +**.pat** | image/x-gimp-pat | - +**.gbr** | image/x-gimp-gbr | - +**.glb** | model/gltf-binary | - +**.cab** | application/x-installshield | - +**.jxr** | image/jxr | image/vnd.ms-photo +**.parquet** | application/vnd.apache.parquet | application/x-parquet +**.one** | application/onenote | - +**.chm** | application/vnd.ms-htmlhelp | - +**.txt** | text/plain | - +**.svg** | image/svg+xml | - +**.html** | text/html | - +**.xml** | text/xml | application/xml +**.rss** | application/rss+xml | text/rss +**.atom** | application/atom+xml | - +**.x3d** | model/x3d+xml | - +**.kml** | application/vnd.google-earth.kml+xml | - +**.xlf** | application/x-xliff+xml | - +**.dae** | model/vnd.collada+xml | - +**.gml** | application/gml+xml | - +**.gpx** | application/gpx+xml | - +**.tcx** | application/vnd.garmin.tcx+xml | - +**.amf** | application/x-amf | - +**.3mf** | application/vnd.ms-package.3dmanufacturing-3dmodel+xml | - +**.xfdf** | application/vnd.adobe.xfdf | - +**.owl** | application/owl+xml | - +**.html** | application/xhtml+xml | - +**.php** | text/x-php | - +**.js** | text/javascript | application/x-javascript, application/javascript +**.lua** | text/x-lua | - +**.pl** | text/x-perl | - +**.py** | text/x-python | text/x-script.python, application/x-python +**.rb** | text/x-ruby | application/x-ruby +**.json** | application/json | - +**.geojson** | application/geo+json | - +**.har** | application/json | - +**.gltf** | model/gltf+json | - +**.ndjson** | application/x-ndjson | - +**.rtf** | text/rtf | application/rtf +**.srt** | application/x-subrip | application/x-srt, text/x-srt +**.tcl** | text/x-tcl | application/x-tcl +**.csv** | text/csv | - +**.tsv** | text/tab-separated-values | - +**.vcf** | text/vcard | - +**.ics** | text/calendar | - +**.warc** | application/warc | - +**.vtt** | text/vtt | - +**.sh** | text/x-shellscript | text/x-sh, application/x-shellscript, application/x-sh +**.pbm** | image/x-portable-bitmap | - +**.pgm** | image/x-portable-graymap | - +**.ppm** | image/x-portable-pixmap | - +**.pam** | image/x-portable-arbitrarymap | - diff --git a/vendor/github.com/gabriel-vasile/mimetype/tree.go b/vendor/github.com/gabriel-vasile/mimetype/tree.go new file mode 100644 index 0000000000..edbde89587 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/tree.go @@ -0,0 +1,289 @@ +package mimetype + +import ( + "sync" + + "github.com/gabriel-vasile/mimetype/internal/magic" +) + +// mimetype stores the list of MIME types in a tree structure with +// "application/octet-stream" at the root of the hierarchy. The hierarchy +// approach minimizes the number of checks that need to be done on the input +// and allows for more precise results once the base type of file has been +// identified. +// +// root is a detector which passes for any slice of bytes. +// When a detector passes the check, the children detectors +// are tried in order to find a more accurate MIME type. +var root = newMIME("application/octet-stream", "", + func([]byte, uint32) bool { return true }, + xpm, sevenZ, zip, pdf, fdf, ole, ps, psd, p7s, ogg, png, jpg, jxl, jp2, jpx, + jpm, jxs, gif, webp, exe, elf, ar, tar, xar, bz2, fits, tiff, bmp, lotus, ico, + mp3, flac, midi, ape, musePack, amr, wav, aiff, au, mpeg, quickTime, mp4, webM, + avi, flv, mkv, asf, aac, voc, m3u, rmvb, gzip, class, swf, crx, ttf, woff, + woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar, djvu, mobi, lit, bpg, cbor, + sqlite3, dwg, nes, lnk, macho, qcp, icns, hdr, mrc, mdb, accdb, zstd, cab, + rpm, xz, lzip, torrent, cpio, tzif, xcf, pat, gbr, glb, cabIS, jxr, parquet, + oneNote, chm, + // Keep text last because it is the slowest check. + text, +) + +// errMIME is returned from Detect functions when err is not nil. +// Detect could return root for erroneous cases, but it needs to lock mu in order to do so. +// errMIME is same as root but it does not require locking. +var errMIME = newMIME("application/octet-stream", "", func([]byte, uint32) bool { return false }) + +// mu guards access to the root MIME tree. Access to root must be synchronized with this lock. +var mu = &sync.RWMutex{} + +// The list of nodes appended to the root node. +var ( + xz = newMIME("application/x-xz", ".xz", magic.Xz) + gzip = newMIME("application/gzip", ".gz", magic.Gzip).alias( + "application/x-gzip", "application/x-gunzip", "application/gzipped", + "application/gzip-compressed", "application/x-gzip-compressed", + "gzip/document") + sevenZ = newMIME("application/x-7z-compressed", ".7z", magic.SevenZ) + // APK must be checked before JAR because APK is a subset of JAR. + // This means APK should be a child of JAR detector, but in practice, + // the decisive signature for JAR might be located at the end of the file + // and not reachable because of library readLimit. + zip = newMIME("application/zip", ".zip", magic.Zip, docx, pptx, xlsx, epub, apk, jar, odt, ods, odp, odg, odf, odc, sxc, kmz, visio). + alias("application/x-zip", "application/x-zip-compressed") + tar = newMIME("application/x-tar", ".tar", magic.Tar) + xar = newMIME("application/x-xar", ".xar", magic.Xar) + bz2 = newMIME("application/x-bzip2", ".bz2", magic.Bz2) + pdf = newMIME("application/pdf", ".pdf", magic.PDF). + alias("application/x-pdf") + fdf = newMIME("application/vnd.fdf", ".fdf", magic.Fdf) + xlsx = newMIME("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx", magic.Xlsx) + docx = newMIME("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx", magic.Docx) + pptx = newMIME("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx", magic.Pptx) + visio = newMIME("application/vnd.ms-visio.drawing.main+xml", ".vsdx", magic.Visio) + epub = newMIME("application/epub+zip", ".epub", magic.Epub) + jar = newMIME("application/java-archive", ".jar", magic.Jar). + alias("application/jar", "application/jar-archive", "application/x-java-archive") + apk = newMIME("application/vnd.android.package-archive", ".apk", magic.APK) + ole = newMIME("application/x-ole-storage", "", magic.Ole, msi, aaf, msg, xls, pub, ppt, doc) + msi = newMIME("application/x-ms-installer", ".msi", magic.Msi). + alias("application/x-windows-installer", "application/x-msi") + aaf = newMIME("application/octet-stream", ".aaf", magic.Aaf) + doc = newMIME("application/msword", ".doc", magic.Doc). + alias("application/vnd.ms-word") + ppt = newMIME("application/vnd.ms-powerpoint", ".ppt", magic.Ppt). + alias("application/mspowerpoint") + pub = newMIME("application/vnd.ms-publisher", ".pub", magic.Pub) + xls = newMIME("application/vnd.ms-excel", ".xls", magic.Xls). + alias("application/msexcel") + msg = newMIME("application/vnd.ms-outlook", ".msg", magic.Msg) + ps = newMIME("application/postscript", ".ps", magic.Ps) + fits = newMIME("application/fits", ".fits", magic.Fits).alias("image/fits") + ogg = newMIME("application/ogg", ".ogg", magic.Ogg, oggAudio, oggVideo). + alias("application/x-ogg") + oggAudio = newMIME("audio/ogg", ".oga", magic.OggAudio) + oggVideo = newMIME("video/ogg", ".ogv", magic.OggVideo) + text = newMIME("text/plain", ".txt", magic.Text, svg, html, xml, php, js, lua, perl, python, ruby, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt, shell, netpbm, netpgm, netppm, netpam) + xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2, xhtml). + alias("application/xml") + xhtml = newMIME("application/xhtml+xml", ".html", magic.XHTML) + json = newMIME("application/json", ".json", magic.JSON, geoJSON, har, gltf) + har = newMIME("application/json", ".har", magic.HAR) + csv = newMIME("text/csv", ".csv", magic.CSV) + tsv = newMIME("text/tab-separated-values", ".tsv", magic.TSV) + geoJSON = newMIME("application/geo+json", ".geojson", magic.GeoJSON) + ndJSON = newMIME("application/x-ndjson", ".ndjson", magic.NdJSON) + html = newMIME("text/html", ".html", magic.HTML) + php = newMIME("text/x-php", ".php", magic.Php) + rtf = newMIME("text/rtf", ".rtf", magic.Rtf).alias("application/rtf") + js = newMIME("text/javascript", ".js", magic.Js). + alias("application/x-javascript", "application/javascript") + srt = newMIME("application/x-subrip", ".srt", magic.Srt). + alias("application/x-srt", "text/x-srt") + vtt = newMIME("text/vtt", ".vtt", magic.Vtt) + lua = newMIME("text/x-lua", ".lua", magic.Lua) + perl = newMIME("text/x-perl", ".pl", magic.Perl) + python = newMIME("text/x-python", ".py", magic.Python). + alias("text/x-script.python", "application/x-python") + ruby = newMIME("text/x-ruby", ".rb", magic.Ruby). + alias("application/x-ruby") + shell = newMIME("text/x-shellscript", ".sh", magic.Shell). + alias("text/x-sh", "application/x-shellscript", "application/x-sh") + tcl = newMIME("text/x-tcl", ".tcl", magic.Tcl). + alias("application/x-tcl") + vCard = newMIME("text/vcard", ".vcf", magic.VCard) + iCalendar = newMIME("text/calendar", ".ics", magic.ICalendar) + svg = newMIME("image/svg+xml", ".svg", magic.Svg) + rss = newMIME("application/rss+xml", ".rss", magic.Rss). + alias("text/rss") + owl2 = newMIME("application/owl+xml", ".owl", magic.Owl2) + atom = newMIME("application/atom+xml", ".atom", magic.Atom) + x3d = newMIME("model/x3d+xml", ".x3d", magic.X3d) + kml = newMIME("application/vnd.google-earth.kml+xml", ".kml", magic.Kml) + kmz = newMIME("application/vnd.google-earth.kmz", ".kmz", magic.KMZ) + xliff = newMIME("application/x-xliff+xml", ".xlf", magic.Xliff) + collada = newMIME("model/vnd.collada+xml", ".dae", magic.Collada) + gml = newMIME("application/gml+xml", ".gml", magic.Gml) + gpx = newMIME("application/gpx+xml", ".gpx", magic.Gpx) + tcx = newMIME("application/vnd.garmin.tcx+xml", ".tcx", magic.Tcx) + amf = newMIME("application/x-amf", ".amf", magic.Amf) + threemf = newMIME("application/vnd.ms-package.3dmanufacturing-3dmodel+xml", ".3mf", magic.Threemf) + png = newMIME("image/png", ".png", magic.Png, apng) + apng = newMIME("image/vnd.mozilla.apng", ".png", magic.Apng) + jpg = newMIME("image/jpeg", ".jpg", magic.Jpg) + jxl = newMIME("image/jxl", ".jxl", magic.Jxl) + jp2 = newMIME("image/jp2", ".jp2", magic.Jp2) + jpx = newMIME("image/jpx", ".jpf", magic.Jpx) + jpm = newMIME("image/jpm", ".jpm", magic.Jpm). + alias("video/jpm") + jxs = newMIME("image/jxs", ".jxs", magic.Jxs) + xpm = newMIME("image/x-xpixmap", ".xpm", magic.Xpm) + bpg = newMIME("image/bpg", ".bpg", magic.Bpg) + gif = newMIME("image/gif", ".gif", magic.Gif) + webp = newMIME("image/webp", ".webp", magic.Webp) + tiff = newMIME("image/tiff", ".tiff", magic.Tiff) + bmp = newMIME("image/bmp", ".bmp", magic.Bmp). + alias("image/x-bmp", "image/x-ms-bmp") + // lotus check must be done before ico because some ico detection is a bit + // relaxed and some lotus files are wrongfully identified as ico otherwise. + lotus = newMIME("application/vnd.lotus-1-2-3", ".123", magic.Lotus123) + ico = newMIME("image/x-icon", ".ico", magic.Ico) + icns = newMIME("image/x-icns", ".icns", magic.Icns) + psd = newMIME("image/vnd.adobe.photoshop", ".psd", magic.Psd). + alias("image/x-psd", "application/photoshop") + heic = newMIME("image/heic", ".heic", magic.Heic) + heicSeq = newMIME("image/heic-sequence", ".heic", magic.HeicSequence) + heif = newMIME("image/heif", ".heif", magic.Heif) + heifSeq = newMIME("image/heif-sequence", ".heif", magic.HeifSequence) + hdr = newMIME("image/vnd.radiance", ".hdr", magic.Hdr) + avif = newMIME("image/avif", ".avif", magic.AVIF) + mp3 = newMIME("audio/mpeg", ".mp3", magic.Mp3). + alias("audio/x-mpeg", "audio/mp3") + flac = newMIME("audio/flac", ".flac", magic.Flac) + midi = newMIME("audio/midi", ".midi", magic.Midi). + alias("audio/mid", "audio/sp-midi", "audio/x-mid", "audio/x-midi") + ape = newMIME("audio/ape", ".ape", magic.Ape) + musePack = newMIME("audio/musepack", ".mpc", magic.MusePack) + wav = newMIME("audio/wav", ".wav", magic.Wav). + alias("audio/x-wav", "audio/vnd.wave", "audio/wave") + aiff = newMIME("audio/aiff", ".aiff", magic.Aiff).alias("audio/x-aiff") + au = newMIME("audio/basic", ".au", magic.Au) + amr = newMIME("audio/amr", ".amr", magic.Amr). + alias("audio/amr-nb") + aac = newMIME("audio/aac", ".aac", magic.AAC) + voc = newMIME("audio/x-unknown", ".voc", magic.Voc) + aMp4 = newMIME("audio/mp4", ".mp4", magic.AMp4). + alias("audio/x-mp4a") + m4a = newMIME("audio/x-m4a", ".m4a", magic.M4a) + m3u = newMIME("application/vnd.apple.mpegurl", ".m3u", magic.M3u). + alias("audio/mpegurl") + m4v = newMIME("video/x-m4v", ".m4v", magic.M4v) + mj2 = newMIME("video/mj2", ".mj2", magic.Mj2) + dvb = newMIME("video/vnd.dvb.file", ".dvb", magic.Dvb) + mp4 = newMIME("video/mp4", ".mp4", magic.Mp4, avif, threeGP, threeG2, aMp4, mqv, m4a, m4v, heic, heicSeq, heif, heifSeq, mj2, dvb) + webM = newMIME("video/webm", ".webm", magic.WebM). + alias("audio/webm") + mpeg = newMIME("video/mpeg", ".mpeg", magic.Mpeg) + quickTime = newMIME("video/quicktime", ".mov", magic.QuickTime) + mqv = newMIME("video/quicktime", ".mqv", magic.Mqv) + threeGP = newMIME("video/3gpp", ".3gp", magic.ThreeGP). + alias("video/3gp", "audio/3gpp") + threeG2 = newMIME("video/3gpp2", ".3g2", magic.ThreeG2). + alias("video/3g2", "audio/3gpp2") + avi = newMIME("video/x-msvideo", ".avi", magic.Avi). + alias("video/avi", "video/msvideo") + flv = newMIME("video/x-flv", ".flv", magic.Flv) + mkv = newMIME("video/x-matroska", ".mkv", magic.Mkv) + asf = newMIME("video/x-ms-asf", ".asf", magic.Asf). + alias("video/asf", "video/x-ms-wmv") + rmvb = newMIME("application/vnd.rn-realmedia-vbr", ".rmvb", magic.Rmvb) + class = newMIME("application/x-java-applet", ".class", magic.Class) + swf = newMIME("application/x-shockwave-flash", ".swf", magic.SWF) + crx = newMIME("application/x-chrome-extension", ".crx", magic.CRX) + ttf = newMIME("font/ttf", ".ttf", magic.Ttf). + alias("font/sfnt", "application/x-font-ttf", "application/font-sfnt") + woff = newMIME("font/woff", ".woff", magic.Woff) + woff2 = newMIME("font/woff2", ".woff2", magic.Woff2) + otf = newMIME("font/otf", ".otf", magic.Otf) + ttc = newMIME("font/collection", ".ttc", magic.Ttc) + eot = newMIME("application/vnd.ms-fontobject", ".eot", magic.Eot) + wasm = newMIME("application/wasm", ".wasm", magic.Wasm) + shp = newMIME("application/vnd.shp", ".shp", magic.Shp) + shx = newMIME("application/vnd.shx", ".shx", magic.Shx, shp) + dbf = newMIME("application/x-dbf", ".dbf", magic.Dbf) + exe = newMIME("application/vnd.microsoft.portable-executable", ".exe", magic.Exe) + elf = newMIME("application/x-elf", "", magic.Elf, elfObj, elfExe, elfLib, elfDump) + elfObj = newMIME("application/x-object", "", magic.ElfObj) + elfExe = newMIME("application/x-executable", "", magic.ElfExe) + elfLib = newMIME("application/x-sharedlib", ".so", magic.ElfLib) + elfDump = newMIME("application/x-coredump", "", magic.ElfDump) + ar = newMIME("application/x-archive", ".a", magic.Ar, deb). + alias("application/x-unix-archive") + deb = newMIME("application/vnd.debian.binary-package", ".deb", magic.Deb) + rpm = newMIME("application/x-rpm", ".rpm", magic.RPM) + dcm = newMIME("application/dicom", ".dcm", magic.Dcm) + odt = newMIME("application/vnd.oasis.opendocument.text", ".odt", magic.Odt, ott). + alias("application/x-vnd.oasis.opendocument.text") + ott = newMIME("application/vnd.oasis.opendocument.text-template", ".ott", magic.Ott). + alias("application/x-vnd.oasis.opendocument.text-template") + ods = newMIME("application/vnd.oasis.opendocument.spreadsheet", ".ods", magic.Ods, ots). + alias("application/x-vnd.oasis.opendocument.spreadsheet") + ots = newMIME("application/vnd.oasis.opendocument.spreadsheet-template", ".ots", magic.Ots). + alias("application/x-vnd.oasis.opendocument.spreadsheet-template") + odp = newMIME("application/vnd.oasis.opendocument.presentation", ".odp", magic.Odp, otp). + alias("application/x-vnd.oasis.opendocument.presentation") + otp = newMIME("application/vnd.oasis.opendocument.presentation-template", ".otp", magic.Otp). + alias("application/x-vnd.oasis.opendocument.presentation-template") + odg = newMIME("application/vnd.oasis.opendocument.graphics", ".odg", magic.Odg, otg). + alias("application/x-vnd.oasis.opendocument.graphics") + otg = newMIME("application/vnd.oasis.opendocument.graphics-template", ".otg", magic.Otg). + alias("application/x-vnd.oasis.opendocument.graphics-template") + odf = newMIME("application/vnd.oasis.opendocument.formula", ".odf", magic.Odf). + alias("application/x-vnd.oasis.opendocument.formula") + odc = newMIME("application/vnd.oasis.opendocument.chart", ".odc", magic.Odc). + alias("application/x-vnd.oasis.opendocument.chart") + sxc = newMIME("application/vnd.sun.xml.calc", ".sxc", magic.Sxc) + rar = newMIME("application/x-rar-compressed", ".rar", magic.RAR). + alias("application/x-rar") + djvu = newMIME("image/vnd.djvu", ".djvu", magic.DjVu) + mobi = newMIME("application/x-mobipocket-ebook", ".mobi", magic.Mobi) + lit = newMIME("application/x-ms-reader", ".lit", magic.Lit) + sqlite3 = newMIME("application/vnd.sqlite3", ".sqlite", magic.Sqlite). + alias("application/x-sqlite3") + dwg = newMIME("image/vnd.dwg", ".dwg", magic.Dwg). + alias("image/x-dwg", "application/acad", "application/x-acad", + "application/autocad_dwg", "application/dwg", "application/x-dwg", + "application/x-autocad", "drawing/dwg") + warc = newMIME("application/warc", ".warc", magic.Warc) + nes = newMIME("application/vnd.nintendo.snes.rom", ".nes", magic.Nes) + lnk = newMIME("application/x-ms-shortcut", ".lnk", magic.Lnk) + macho = newMIME("application/x-mach-binary", ".macho", magic.MachO) + qcp = newMIME("audio/qcelp", ".qcp", magic.Qcp) + mrc = newMIME("application/marc", ".mrc", magic.Marc) + mdb = newMIME("application/x-msaccess", ".mdb", magic.MsAccessMdb) + accdb = newMIME("application/x-msaccess", ".accdb", magic.MsAccessAce) + zstd = newMIME("application/zstd", ".zst", magic.Zstd) + cab = newMIME("application/vnd.ms-cab-compressed", ".cab", magic.Cab) + cabIS = newMIME("application/x-installshield", ".cab", magic.InstallShieldCab) + lzip = newMIME("application/lzip", ".lz", magic.Lzip).alias("application/x-lzip") + torrent = newMIME("application/x-bittorrent", ".torrent", magic.Torrent) + cpio = newMIME("application/x-cpio", ".cpio", magic.Cpio) + tzif = newMIME("application/tzif", "", magic.TzIf) + p7s = newMIME("application/pkcs7-signature", ".p7s", magic.P7s) + xcf = newMIME("image/x-xcf", ".xcf", magic.Xcf) + pat = newMIME("image/x-gimp-pat", ".pat", magic.Pat) + gbr = newMIME("image/x-gimp-gbr", ".gbr", magic.Gbr) + xfdf = newMIME("application/vnd.adobe.xfdf", ".xfdf", magic.Xfdf) + glb = newMIME("model/gltf-binary", ".glb", magic.GLB) + gltf = newMIME("model/gltf+json", ".gltf", magic.GLTF) + jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo") + parquet = newMIME("application/vnd.apache.parquet", ".parquet", magic.Par1). + alias("application/x-parquet") + netpbm = newMIME("image/x-portable-bitmap", ".pbm", magic.NetPBM) + netpgm = newMIME("image/x-portable-graymap", ".pgm", magic.NetPGM) + netppm = newMIME("image/x-portable-pixmap", ".ppm", magic.NetPPM) + netpam = newMIME("image/x-portable-arbitrarymap", ".pam", magic.NetPAM) + cbor = newMIME("application/cbor", ".cbor", magic.CBOR) + oneNote = newMIME("application/onenote", ".one", magic.One) + chm = newMIME("application/vnd.ms-htmlhelp", ".chm", magic.CHM) +) diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml new file mode 100644 index 0000000000..d50237a608 --- /dev/null +++ b/vendor/github.com/go-playground/locales/.travis.yml @@ -0,0 +1,26 @@ +language: go +go: + - 1.13.1 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 0000000000..75854ac4f0 --- /dev/null +++ b/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 0000000000..7b6be2c647 --- /dev/null +++ b/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,170 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.14.1-green.svg) +[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 0000000000..b5a95fb074 --- /dev/null +++ b/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,311 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MRU + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UYW + UZS + VEB + VEF + VES + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 0000000000..3038276e68 Binary files /dev/null and b/vendor/github.com/go-playground/locales/logo.png differ diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 0000000000..9202900149 --- /dev/null +++ b/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 0000000000..bc4e07f34e --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.coverprofile \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml new file mode 100644 index 0000000000..39b8b923e4 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - 1.13.4 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.13.4 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 0000000000..8d8aba15ba --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-playground/universal-translator/Makefile b/vendor/github.com/go-playground/universal-translator/Makefile new file mode 100644 index 0000000000..ec3455bd59 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \ + } + +lint: linters-install + golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 0000000000..d9b6654741 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,87 @@ +## universal-translator +![Project status](https://img.shields.io/badge/version-0.18.1-green.svg) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 0000000000..38b163b626 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 0000000000..87a1b465cb --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,274 @@ +package ut + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := io.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "Zero": + return locales.PluralRuleZero + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 0000000000..a37aa8c0cd Binary files /dev/null and b/vendor/github.com/go-playground/universal-translator/logo.png differ diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go new file mode 100644 index 0000000000..24b18db92a --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/translator.go @@ -0,0 +1,420 @@ +package ut + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-playground/locales" +) + +const ( + paramZero = "{0}" + paramOne = "{1}" + unknownTranslation = "" +) + +// Translator is universal translators +// translator instance which is a thin wrapper +// around locales.Translator instance providing +// some extra functionality +type Translator interface { + locales.Translator + + // adds a normal translation for a particular language/locale + // {#} is the only replacement type accepted and are ad infinitum + // eg. one: '{0} day left' other: '{0} days left' + Add(key interface{}, text string, override bool) error + + // adds a cardinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0} day left' other: '{0} days left' + AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds an ordinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' + // - 1st, 2nd, 3rd... + AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds a range plural translation for a particular language/locale + // {0} and {1} are the only replacement types accepted and only these are accepted. + // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' + AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error + + // creates the translation for the locale given the 'key' and params passed in + T(key interface{}, params ...string) (string, error) + + // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + C(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + O(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and + // 'digit2' arguments and 'param1' and 'param2' passed in + R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) + + // VerifyTranslations checks to ensures that no plural rules have been + // missed within the translations. + VerifyTranslations() error +} + +var _ Translator = new(translator) +var _ locales.Translator = new(translator) + +type translator struct { + locales.Translator + translations map[interface{}]*transText + cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown + ordinalTanslations map[interface{}][]*transText + rangeTanslations map[interface{}][]*transText +} + +type transText struct { + text string + indexes []int +} + +func newTranslator(trans locales.Translator) Translator { + return &translator{ + Translator: trans, + translations: make(map[interface{}]*transText), // translation text broken up by byte index + cardinalTanslations: make(map[interface{}][]*transText), + ordinalTanslations: make(map[interface{}][]*transText), + rangeTanslations: make(map[interface{}][]*transText), + } +} + +// Add adds a normal translation for a particular language/locale +// {#} is the only replacement type accepted and are ad infinitum +// eg. one: '{0} day left' other: '{0} days left' +func (t *translator) Add(key interface{}, text string, override bool) error { + + if _, ok := t.translations[key]; ok && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text} + } + + lb := strings.Count(text, "{") + rb := strings.Count(text, "}") + + if lb != rb { + return &ErrMissingBracket{locale: t.Locale(), key: key, text: text} + } + + trans := &transText{ + text: text, + } + + var idx int + + for i := 0; i < lb; i++ { + s := "{" + strconv.Itoa(i) + "}" + idx = strings.Index(text, s) + if idx == -1 { + return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text} + } + + trans.indexes = append(trans.indexes, idx) + trans.indexes = append(trans.indexes, idx+len(s)) + } + + t.translations[key] = trans + + return nil +} + +// AddCardinal adds a cardinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0} day left' other: '{0} days left' +func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsCardinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.cardinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 0000000000..dbf707f5c7 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore new file mode 100644 index 0000000000..6305e52900 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +bin + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.test +*.out +*.txt +/**/*.DS_Store +cover.html +README.html +.idea diff --git a/vendor/github.com/go-playground/validator/v10/.golangci.yaml b/vendor/github.com/go-playground/validator/v10/.golangci.yaml new file mode 100644 index 0000000000..dd9c05cc8b --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/.golangci.yaml @@ -0,0 +1,54 @@ +version: "2" +linters: + default: all + disable: + - noinlineerr + - wsl_v5 + - copyloopvar + - cyclop + - depguard + - dogsled + - dupl + - dupword + - err113 + - errorlint + - exhaustive + - exhaustruct + - forbidigo + - forcetypeassert + - funlen + - gochecknoglobals + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - gosec + - gosmopolitan + - interfacebloat + - intrange + - ireturn + - lll + - maintidx + - misspell + - mnd + - nakedret + - nestif + - nilnil + - nlreturn + - nonamedreturns + - paralleltest + - perfsprint + - prealloc + - recvcheck + - revive + - staticcheck + - tagalign + - tagliatelle + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - wrapcheck + - wsl diff --git a/vendor/github.com/go-playground/validator/v10/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE new file mode 100644 index 0000000000..6a2ae9aa4d --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md new file mode 100644 index 0000000000..b809c4ce12 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md @@ -0,0 +1,16 @@ +## Maintainers Guide + +### Semantic Versioning +Semantic versioning as defined [here](https://semver.org) must be strictly adhered to. + +### External Dependencies +Any new external dependencies MUST: +- Have a compatible LICENSE present. +- Be actively maintained. +- Be approved by @go-playground/admins + +### PR Merge Requirements +- Up-to-date branch. +- Passing tests and linting. +- CODEOWNERS approval. +- Tests that cover both the Happy and Unhappy paths. \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile new file mode 100644 index 0000000000..e7caab7f12 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -0,0 +1,18 @@ +GOCMD=go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v2.0.2; \ + } + +lint: linters-install + golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -run=NONE -bench=. -benchmem ./... + +.PHONY: test lint linters-install diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md new file mode 100644 index 0000000000..cb5d419459 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -0,0 +1,384 @@ +Package validator +================= +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/go-playground/validator)](https://github.com/go-playground/validator/releases) +[![Build Status](https://github.com/go-playground/validator/actions/workflows/workflow.yml/badge.svg)](https://github.com/go-playground/validator/actions) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Ability to dive into both map keys and values for validation +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError +- Customizable i18n aware error messages. +- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding) + +A Call for Maintainers +---------------------- + +Please read the discussiong started [here](https://github.com/go-playground/validator/discussions/1330) if you are interested in contributing/helping maintain this package. + +Installation +------------ + +Use go get. + + go get github.com/go-playground/validator/v10 + +Then import the validator package into your own code. + + import "github.com/go-playground/validator/v10" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs. + +##### Examples: + +- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go) +- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go) +- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go) +- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go) +- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) +- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash) + +Baked-in Validations +------ + +### Special Notes: +- If new to using validator it is highly recommended to initialize it using the `WithRequiredStructEnabled` option which is opt-in to new behaviour that will become the default behaviour in v11+. See documentation for more details. +```go +validate := validator.New(validator.WithRequiredStructEnabled()) +``` + +### Fields: + +| Tag | Description | +| - | - | +| eqcsfield | Field Equals Another Field (relative)| +| eqfield | Field Equals Another Field | +| fieldcontains | Check the indicated characters are present in the Field | +| fieldexcludes | Check the indicated characters are not present in the field | +| gtcsfield | Field Greater Than Another Relative Field | +| gtecsfield | Field Greater Than or Equal To Another Relative Field | +| gtefield | Field Greater Than or Equal To Another Field | +| gtfield | Field Greater Than Another Field | +| ltcsfield | Less Than Another Relative Field | +| ltecsfield | Less Than or Equal To Another Relative Field | +| ltefield | Less Than or Equal To Another Field | +| ltfield | Less Than Another Field | +| necsfield | Field Does Not Equal Another Field (relative) | +| nefield | Field Does Not Equal Another Field | + +### Network: + +| Tag | Description | +| - | - | +| cidr | Classless Inter-Domain Routing CIDR | +| cidrv4 | Classless Inter-Domain Routing CIDRv4 | +| cidrv6 | Classless Inter-Domain Routing CIDRv6 | +| datauri | Data URL | +| fqdn | Full Qualified Domain Name (FQDN) | +| hostname | Hostname RFC 952 | +| hostname_rfc1123 | Hostname RFC 1123 | +| hostname_port | HostPort | +| port | Port number | +| ip | Internet Protocol Address IP | +| ip4_addr | Internet Protocol Address IPv4 | +| ip6_addr | Internet Protocol Address IPv6 | +| ip_addr | Internet Protocol Address IP | +| ipv4 | Internet Protocol Address IPv4 | +| ipv6 | Internet Protocol Address IPv6 | +| mac | Media Access Control Address MAC | +| tcp4_addr | Transmission Control Protocol Address TCPv4 | +| tcp6_addr | Transmission Control Protocol Address TCPv6 | +| tcp_addr | Transmission Control Protocol Address TCP | +| udp4_addr | User Datagram Protocol Address UDPv4 | +| udp6_addr | User Datagram Protocol Address UDPv6 | +| udp_addr | User Datagram Protocol Address UDP | +| unix_addr | Unix domain socket end point Address | +| uri | URI String | +| url | URL String | +| http_url | HTTP(s) URL String | +| https_url | HTTPS-only URL String | +| url_encoded | URL Encoded | +| urn_rfc2141 | Urn RFC 2141 String | + +### Strings: + +| Tag | Description | +| - | - | +| alpha | Alpha Only | +| alphaspace | Alpha Space | +| alphanum | Alphanumeric | +| alphanumunicode | Alphanumeric Unicode | +| alphaunicode | Alpha Unicode | +| ascii | ASCII | +| boolean | Boolean | +| contains | Contains | +| containsany | Contains Any | +| containsrune | Contains Rune | +| endsnotwith | Ends Not With | +| endswith | Ends With | +| excludes | Excludes | +| excludesall | Excludes All | +| excludesrune | Excludes Rune | +| lowercase | Lowercase | +| multibyte | Multi-Byte Characters | +| number | Number | +| numeric | Numeric | +| printascii | Printable ASCII | +| startsnotwith | Starts Not With | +| startswith | Starts With | +| uppercase | Uppercase | + +### Format: +| Tag | Description | +| - | - | +| base64 | Base64 String | +| base64url | Base64URL String | +| base64rawurl | Base64RawURL String | +| bic | Business Identifier Code (ISO 9362) | +| bcp47_language_tag | Language tag (BCP 47) | +| btc_addr | Bitcoin Address | +| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | +| credit_card | Credit Card Number | +| mongodb | MongoDB ObjectID | +| mongodb_connection_string | MongoDB Connection String | +| cron | Cron | +| spicedb | SpiceDb ObjectID/Permission/Type | +| datetime | Datetime | +| e164 | e164 formatted phone number | +| ein | U.S. Employeer Identification Number | +| email | E-mail String +| eth_addr | Ethereum Address | +| hexadecimal | Hexadecimal String | +| hexcolor | Hexcolor String | +| hsl | HSL String | +| hsla | HSLA String | +| html | HTML Tags | +| html_encoded | HTML Encoded | +| isbn | International Standard Book Number | +| isbn10 | International Standard Book Number 10 | +| isbn13 | International Standard Book Number 13 | +| issn | International Standard Serial Number | +| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) | +| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) | +| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) | +| iso3166_2 | Country subdivision code (ISO 3166-2) | +| iso4217 | Currency code (ISO 4217) | +| json | JSON | +| jwt | JSON Web Token (JWT) | +| latitude | Latitude | +| longitude | Longitude | +| luhn_checksum | Luhn Algorithm Checksum (for strings and (u)int) | +| postcode_iso3166_alpha2 | Postcode | +| postcode_iso3166_alpha2_field | Postcode | +| rgb | RGB String | +| rgba | RGBA String | +| ssn | Social Security Number SSN | +| timezone | Timezone | +| uuid | Universally Unique Identifier UUID | +| uuid3 | Universally Unique Identifier UUID v3 | +| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 | +| uuid4 | Universally Unique Identifier UUID v4 | +| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 | +| uuid5 | Universally Unique Identifier UUID v5 | +| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 | +| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 | +| md4 | MD4 hash | +| md5 | MD5 hash | +| sha256 | SHA256 hash | +| sha384 | SHA384 hash | +| sha512 | SHA512 hash | +| ripemd128 | RIPEMD-128 hash | +| ripemd128 | RIPEMD-160 hash | +| tiger128 | TIGER128 hash | +| tiger160 | TIGER160 hash | +| tiger192 | TIGER192 hash | +| semver | Semantic Versioning 2.0.0 | +| ulid | Universally Unique Lexicographically Sortable Identifier ULID | +| cve | Common Vulnerabilities and Exposures Identifier (CVE id) | + +### Comparisons: +| Tag | Description | +| - | - | +| eq | Equals | +| eq_ignore_case | Equals ignoring case | +| gt | Greater than| +| gte | Greater than or equal | +| lt | Less Than | +| lte | Less Than or Equal | +| ne | Not Equal | +| ne_ignore_case | Not Equal ignoring case | + +### Other: +| Tag | Description | +| - | - | +| dir | Existing Directory | +| dirpath | Directory Path | +| file | Existing File | +| filepath | File Path | +| image | Image | +| isdefault | Is Default | +| len | Length | +| max | Maximum | +| min | Minimum | +| oneof | One Of | +| required | Required | +| required_if | Required If | +| required_unless | Required Unless | +| required_with | Required With | +| required_with_all | Required With All | +| required_without | Required Without | +| required_without_all | Required Without All | +| excluded_if | Excluded If | +| excluded_unless | Excluded Unless | +| excluded_with | Excluded With | +| excluded_with_all | Excluded With All | +| excluded_without | Excluded Without | +| excluded_without_all | Excluded Without All | +| unique | Unique | +| validateFn | Verify if the method `Validate() error` does not return an error (or any specified method) | + + +#### Aliases: +| Tag | Description | +| - | - | +| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla | +| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric | + +Benchmarks +------ +###### Run on MacBook Pro Max M3 +```go +go version go1.23.3 darwin/arm64 +goos: darwin +goarch: arm64 +cpu: Apple M3 Max +pkg: github.com/go-playground/validator/v10 +BenchmarkFieldSuccess-16 42461943 27.88 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-16 486632887 2.289 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-16 9566167 121.3 ns/op 200 B/op 4 allocs/op +BenchmarkFieldFailureParallel-16 17551471 83.68 ns/op 200 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-16 7602306 155.6 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-16 20664610 59.80 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveFailure-16 4659756 252.9 ns/op 301 B/op 10 allocs/op +BenchmarkFieldArrayDiveFailureParallel-16 8010116 152.9 ns/op 301 B/op 10 allocs/op +BenchmarkFieldMapDiveSuccess-16 2834575 421.2 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveSuccessParallel-16 7179700 171.8 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveFailure-16 3081728 384.4 ns/op 376 B/op 13 allocs/op +BenchmarkFieldMapDiveFailureParallel-16 6058137 204.0 ns/op 377 B/op 13 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-16 2544975 464.8 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-16 6661954 181.4 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-16 2435484 490.7 ns/op 553 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-16 4249617 282.0 ns/op 554 B/op 16 allocs/op +BenchmarkFieldCustomTypeSuccess-16 14943525 77.35 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-16 64051954 20.61 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-16 10721384 107.1 ns/op 184 B/op 3 allocs/op +BenchmarkFieldCustomTypeFailureParallel-16 18714495 69.77 ns/op 184 B/op 3 allocs/op +BenchmarkFieldOrTagSuccess-16 4063124 294.3 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-16 31903756 41.22 ns/op 18 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-16 7748558 146.8 ns/op 216 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-16 13139854 92.05 ns/op 216 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-16 16808389 70.25 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationSuccessParallel-16 90686955 14.47 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationFailure-16 5818791 200.2 ns/op 264 B/op 7 allocs/op +BenchmarkStructLevelValidationFailureParallel-16 11115874 107.5 ns/op 264 B/op 7 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-16 7764956 151.9 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-16 52316265 30.37 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-16 4195429 277.2 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-16 7305661 164.6 ns/op 432 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-16 6312625 186.1 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredSuccessParallel-16 13684459 93.42 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailure-16 6751482 171.2 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailureParallel-16 14146070 86.93 ns/op 216 B/op 5 allocs/op +BenchmarkStructPartialSuccess-16 6544448 177.3 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialSuccessParallel-16 13951946 88.73 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialFailure-16 4075833 287.5 ns/op 440 B/op 9 allocs/op +BenchmarkStructPartialFailureParallel-16 7490805 161.3 ns/op 440 B/op 9 allocs/op +BenchmarkStructExceptSuccess-16 4107187 281.4 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptSuccessParallel-16 15979173 80.86 ns/op 208 B/op 3 allocs/op +BenchmarkStructExceptFailure-16 4434372 264.3 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptFailureParallel-16 8081367 154.1 ns/op 424 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-16 6459542 183.4 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-16 41013781 37.95 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-16 4034998 292.1 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-16 11348446 115.3 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-16 4448528 267.7 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-16 26813619 48.33 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-16 3090646 384.5 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-16 9870906 129.5 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-16 10675562 109.5 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-16 131159784 8.932 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-16 4094979 286.6 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-16 7606663 157.9 ns/op 416 B/op 9 allocs/op +BenchmarkStructComplexSuccess-16 2073470 576.0 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexSuccessParallel-16 7821831 161.3 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexFailure-16 576358 2001 ns/op 3042 B/op 48 allocs/op +BenchmarkStructComplexFailureParallel-16 1000000 1171 ns/op 3041 B/op 48 allocs/op +BenchmarkOneof-16 22503973 52.82 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-16 8538474 140.4 ns/op 0 B/op 0 allocs/op +``` + +Complementary Software +---------------------- + +Here is a list of software that complements using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects + +How to Contribute +------ + +Make a pull request... + +Maintenance and support for SDK major versions +---------------------------------------------- + +See prior discussion [here](https://github.com/go-playground/validator/discussions/1342) for more details. + +This package is aligned with the [Go release policy](https://go.dev/doc/devel/release) in that support is guaranteed for +the two most recent major versions. + +This does not mean the package will not work with older versions of Go, only that we reserve the right to increase the +MSGV(Minimum Supported Go Version) when the need arises to address Security issues/patches, OS issues & support or newly +introduced functionality that would greatly benefit the maintenance and/or usage of this package. + +If and when the MSGV is increased it will be done so in a minimum of a `Minor` release bump. + +License +------- +Distributed under MIT License, please see license file within the code for more details. + +Maintainers +----------- +This project has grown large enough that more than one person is required to properly support the community. +If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go new file mode 100644 index 0000000000..8fd55e77ec --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -0,0 +1,3148 @@ +package validator + +import ( + "bytes" + "cmp" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/fs" + "net" + "net/mail" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unicode/utf8" + + "golang.org/x/crypto/sha3" + "golang.org/x/text/language" + + "github.com/gabriel-vasile/mimetype" + urn "github.com/leodido/go-urn" +) + +// Func accepts a FieldLevel interface for all validation needs. The return +// value should be true when validation succeeds. +type Func func(fl FieldLevel) bool + +// FuncCtx accepts a context.Context and FieldLevel interface for all +// validation needs. The return value should be true when validation succeeds. +type FuncCtx func(ctx context.Context, fl FieldLevel) bool + +// wrapFunc wraps normal Func makes it compatible with FuncCtx +func wrapFunc(fn Func) FuncCtx { + if fn == nil { + return nil // be sure not to wrap a bad function. + } + return func(ctx context.Context, fl FieldLevel) bool { + return fn(fl) + } +} + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + keysTag: {}, + endKeysTag: {}, + structOnlyTag: {}, + omitzero: {}, + omitempty: {}, + omitnil: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + requiredTag: {}, + isdefault: {}, + } + + // bakedInAliases is a default mapping of a single validation tag that + // defines a common or complex set of validation(s) to simplify + // adding validation to structs. + bakedInAliases = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + "eu_country_code": "iso3166_1_alpha2_eu|iso3166_1_alpha3_eu|iso3166_1_alpha_numeric_eu", + } + + // bakedInValidators is the default map of ValidationFunc + // you can add, remove or even replace items to suite your needs, + // or even disregard and use your own map if so desired. + bakedInValidators = map[string]Func{ + "required": hasValue, + "required_if": requiredIf, + "required_unless": requiredUnless, + "skip_unless": skipUnless, + "required_with": requiredWith, + "required_with_all": requiredWithAll, + "required_without": requiredWithout, + "required_without_all": requiredWithoutAll, + "excluded_if": excludedIf, + "excluded_unless": excludedUnless, + "excluded_with": excludedWith, + "excluded_with_all": excludedWithAll, + "excluded_without": excludedWithout, + "excluded_without_all": excludedWithoutAll, + "isdefault": isDefault, + "len": hasLengthOf, + "min": hasMinOf, + "max": hasMaxOf, + "eq": isEq, + "eq_ignore_case": isEqIgnoreCase, + "ne": isNe, + "ne_ignore_case": isNeIgnoreCase, + "lt": isLt, + "lte": isLte, + "gt": isGt, + "gte": isGte, + "eqfield": isEqField, + "eqcsfield": isEqCrossStructField, + "necsfield": isNeCrossStructField, + "gtcsfield": isGtCrossStructField, + "gtecsfield": isGteCrossStructField, + "ltcsfield": isLtCrossStructField, + "ltecsfield": isLteCrossStructField, + "nefield": isNeField, + "gtefield": isGteField, + "gtfield": isGtField, + "ltefield": isLteField, + "ltfield": isLtField, + "fieldcontains": fieldContains, + "fieldexcludes": fieldExcludes, + "alpha": isAlpha, + "alphaspace": isAlphaSpace, + "alphanum": isAlphanum, + "alphaunicode": isAlphaUnicode, + "alphanumunicode": isAlphanumUnicode, + "boolean": isBoolean, + "numeric": isNumeric, + "number": isNumber, + "hexadecimal": isHexadecimal, + "hexcolor": isHEXColor, + "rgb": isRGB, + "rgba": isRGBA, + "hsl": isHSL, + "hsla": isHSLA, + "e164": isE164, + "email": isEmail, + "url": isURL, + "http_url": isHttpURL, + "https_url": isHttpsURL, + "uri": isURI, + "urn_rfc2141": isUrnRFC2141, // RFC 2141 + "file": isFile, + "filepath": isFilePath, + "base32": isBase32, + "base64": isBase64, + "base64url": isBase64URL, + "base64rawurl": isBase64RawURL, + "contains": contains, + "containsany": containsAny, + "containsrune": containsRune, + "excludes": excludes, + "excludesall": excludesAll, + "excludesrune": excludesRune, + "startswith": startsWith, + "endswith": endsWith, + "startsnotwith": startsNotWith, + "endsnotwith": endsNotWith, + "image": isImage, + "isbn": isISBN, + "isbn10": isISBN10, + "isbn13": isISBN13, + "issn": isISSN, + "eth_addr": isEthereumAddress, + "eth_addr_checksum": isEthereumAddressChecksum, + "btc_addr": isBitcoinAddress, + "btc_addr_bech32": isBitcoinBech32Address, + "uuid": isUUID, + "uuid3": isUUID3, + "uuid4": isUUID4, + "uuid5": isUUID5, + "uuid_rfc4122": isUUIDRFC4122, + "uuid3_rfc4122": isUUID3RFC4122, + "uuid4_rfc4122": isUUID4RFC4122, + "uuid5_rfc4122": isUUID5RFC4122, + "ulid": isULID, + "md4": isMD4, + "md5": isMD5, + "sha256": isSHA256, + "sha384": isSHA384, + "sha512": isSHA512, + "ripemd128": isRIPEMD128, + "ripemd160": isRIPEMD160, + "tiger128": isTIGER128, + "tiger160": isTIGER160, + "tiger192": isTIGER192, + "ascii": isASCII, + "printascii": isPrintableASCII, + "multibyte": hasMultiByteCharacter, + "datauri": isDataURI, + "latitude": isLatitude, + "longitude": isLongitude, + "ssn": isSSN, + "ipv4": isIPv4, + "ipv6": isIPv6, + "ip": isIP, + "cidrv4": isCIDRv4, + "cidrv6": isCIDRv6, + "cidr": isCIDR, + "tcp4_addr": isTCP4AddrResolvable, + "tcp6_addr": isTCP6AddrResolvable, + "tcp_addr": isTCPAddrResolvable, + "udp4_addr": isUDP4AddrResolvable, + "udp6_addr": isUDP6AddrResolvable, + "udp_addr": isUDPAddrResolvable, + "ip4_addr": isIP4AddrResolvable, + "ip6_addr": isIP6AddrResolvable, + "ip_addr": isIPAddrResolvable, + "unix_addr": isUnixAddrResolvable, + "mac": isMAC, + "hostname": isHostnameRFC952, // RFC 952 + "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 + "fqdn": isFQDN, + "unique": isUnique, + "oneof": isOneOf, + "oneofci": isOneOfCI, + "html": isHTML, + "html_encoded": isHTMLEncoded, + "url_encoded": isURLEncoded, + "dir": isDir, + "dirpath": isDirPath, + "json": isJSON, + "jwt": isJWT, + "hostname_port": isHostnamePort, + "port": isPort, + "lowercase": isLowercase, + "uppercase": isUppercase, + "datetime": isDatetime, + "timezone": isTimeZone, + "iso3166_1_alpha2": isIso3166Alpha2, + "iso3166_1_alpha2_eu": isIso3166Alpha2EU, + "iso3166_1_alpha3": isIso3166Alpha3, + "iso3166_1_alpha3_eu": isIso3166Alpha3EU, + "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + "iso3166_1_alpha_numeric_eu": isIso3166AlphaNumericEU, + "iso3166_2": isIso31662, + "iso4217": isIso4217, + "iso4217_numeric": isIso4217Numeric, + "bcp47_language_tag": isBCP47LanguageTag, + "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2, + "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field, + "bic": isIsoBicFormat, + "semver": isSemverFormat, + "dns_rfc1035_label": isDnsRFC1035LabelFormat, + "credit_card": isCreditCard, + "cve": isCveFormat, + "luhn_checksum": hasLuhnChecksum, + "mongodb": isMongoDBObjectId, + "mongodb_connection_string": isMongoDBConnectionString, + "cron": isCron, + "spicedb": isSpiceDB, + "ein": isEIN, + "validateFn": isValidateFn, + } +) + +var ( + oneofValsCache = map[string][]string{} + oneofValsCacheRWLock = sync.RWMutex{} +) + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + vals, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + vals = splitParamsRegex().FindAllString(s, -1) + for i := 0; i < len(vals); i++ { + vals[i] = strings.ReplaceAll(vals[i], "'", "") + } + oneofValsCache[s] = vals + oneofValsCacheRWLock.Unlock() + } + return vals +} + +func isURLEncoded(fl FieldLevel) bool { + return uRLEncodedRegex().MatchString(fl.Field().String()) +} + +func isHTMLEncoded(fl FieldLevel) bool { + return hTMLEncodedRegex().MatchString(fl.Field().String()) +} + +func isHTML(fl FieldLevel) bool { + return hTMLRegex().MatchString(fl.Field().String()) +} + +func isOneOf(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + for i := 0; i < len(vals); i++ { + if vals[i] == v { + return true + } + } + return false +} + +// isOneOfCI is the validation function for validating if the current field's value is one of the provided string values (case insensitive). +func isOneOfCI(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + field := fl.Field() + + if field.Kind() != reflect.String { + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + v := field.String() + for _, val := range vals { + if strings.EqualFold(val, v) { + return true + } + } + return false +} + +// isUnique is the validation function for validating if each array|slice|map value is unique +func isUnique(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + v := reflect.ValueOf(struct{}{}) + + switch field.Kind() { + case reflect.Slice, reflect.Array: + elem := field.Type().Elem() + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + + if param == "" { + m := reflect.MakeMap(reflect.MapOf(elem, v.Type())) + + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(field.Index(i)), v) + } + return field.Len() == m.Len() + } + + sf, ok := elem.FieldByName(param) + if !ok { + panic(fmt.Sprintf("Bad field name %s", param)) + } + + sfTyp := sf.Type + if sfTyp.Kind() == reflect.Ptr { + sfTyp = sfTyp.Elem() + } + + m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type())) + var fieldlen int + for i := 0; i < field.Len(); i++ { + key := reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)) + if key.IsValid() { + fieldlen++ + m.SetMapIndex(key, v) + } + } + return fieldlen == m.Len() + case reflect.Map: + var m reflect.Value + if field.Type().Elem().Kind() == reflect.Ptr { + m = reflect.MakeMap(reflect.MapOf(field.Type().Elem().Elem(), v.Type())) + } else { + m = reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + } + + for _, k := range field.MapKeys() { + m.SetMapIndex(reflect.Indirect(field.MapIndex(k)), v) + } + + return field.Len() == m.Len() + default: + if parent := fl.Parent(); parent.Kind() == reflect.Struct { + uniqueField := parent.FieldByName(param) + if uniqueField == reflect.ValueOf(nil) { + panic(fmt.Sprintf("Bad field name provided %s", param)) + } + + if uniqueField.Kind() != field.Kind() { + panic(fmt.Sprintf("Bad field type %s:%s", field.Type(), uniqueField.Type())) + } + + return getValue(field) != getValue(uniqueField) + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } +} + +// isMAC is the validation function for validating if the field's value is a valid MAC address. +func isMAC(fl FieldLevel) bool { + _, err := net.ParseMAC(fl.Field().String()) + + return err == nil +} + +// isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +func isCIDRv4(fl FieldLevel) bool { + ip, net, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() != nil && net.IP.Equal(ip) +} + +// isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +func isCIDRv6(fl FieldLevel) bool { + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() == nil +} + +// isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +func isCIDR(fl FieldLevel) bool { + _, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil +} + +// isIPv4 is the validation function for validating if a value is a valid v4 IP address. +func isIPv4(fl FieldLevel) bool { + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() != nil +} + +// isIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +func isIPv6(fl FieldLevel) bool { + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() == nil +} + +// isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +func isIP(fl FieldLevel) bool { + ip := net.ParseIP(fl.Field().String()) + + return ip != nil +} + +// isSSN is the validation function for validating if the field's value is a valid SSN. +func isSSN(fl FieldLevel) bool { + field := fl.Field() + + if field.Len() != 11 { + return false + } + + return sSNRegex().MatchString(field.String()) +} + +// isLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +func isLongitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + + return longitudeRegex().MatchString(v) +} + +// isLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +func isLatitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + + return latitudeRegex().MatchString(v) +} + +// isDataURI is the validation function for validating if the field's value is a valid data URI. +func isDataURI(fl FieldLevel) bool { + uri := strings.SplitN(fl.Field().String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex().MatchString(uri[0]) { + return false + } + + return base64Regex().MatchString(uri[1]) +} + +// hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +func hasMultiByteCharacter(fl FieldLevel) bool { + field := fl.Field() + + if field.Len() == 0 { + return true + } + + return multibyteRegex().MatchString(field.String()) +} + +// isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +func isPrintableASCII(fl FieldLevel) bool { + return printableASCIIRegex().MatchString(fl.Field().String()) +} + +// isASCII is the validation function for validating if the field's value is a valid ASCII character. +func isASCII(fl FieldLevel) bool { + return aSCIIRegex().MatchString(fl.Field().String()) +} + +// isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +func isUUID5(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID5Regex, fl) +} + +// isUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +func isUUID4(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID4Regex, fl) +} + +// isUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +func isUUID3(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID3Regex, fl) +} + +// isUUID is the validation function for validating if the field's value is a valid UUID of any version. +func isUUID(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUIDRegex, fl) +} + +// isUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +func isUUID5RFC4122(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID5RFC4122Regex, fl) +} + +// isUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +func isUUID4RFC4122(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID4RFC4122Regex, fl) +} + +// isUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +func isUUID3RFC4122(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUID3RFC4122Regex, fl) +} + +// isUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +func isUUIDRFC4122(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uUIDRFC4122Regex, fl) +} + +// isULID is the validation function for validating if the field's value is a valid ULID. +func isULID(fl FieldLevel) bool { + return fieldMatchesRegexByStringerValOrString(uLIDRegex, fl) +} + +// isMD4 is the validation function for validating if the field's value is a valid MD4. +func isMD4(fl FieldLevel) bool { + return md4Regex().MatchString(fl.Field().String()) +} + +// isMD5 is the validation function for validating if the field's value is a valid MD5. +func isMD5(fl FieldLevel) bool { + return md5Regex().MatchString(fl.Field().String()) +} + +// isSHA256 is the validation function for validating if the field's value is a valid SHA256. +func isSHA256(fl FieldLevel) bool { + return sha256Regex().MatchString(fl.Field().String()) +} + +// isSHA384 is the validation function for validating if the field's value is a valid SHA384. +func isSHA384(fl FieldLevel) bool { + return sha384Regex().MatchString(fl.Field().String()) +} + +// isSHA512 is the validation function for validating if the field's value is a valid SHA512. +func isSHA512(fl FieldLevel) bool { + return sha512Regex().MatchString(fl.Field().String()) +} + +// isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128. +func isRIPEMD128(fl FieldLevel) bool { + return ripemd128Regex().MatchString(fl.Field().String()) +} + +// isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160. +func isRIPEMD160(fl FieldLevel) bool { + return ripemd160Regex().MatchString(fl.Field().String()) +} + +// isTIGER128 is the validation function for validating if the field's value is a valid TIGER128. +func isTIGER128(fl FieldLevel) bool { + return tiger128Regex().MatchString(fl.Field().String()) +} + +// isTIGER160 is the validation function for validating if the field's value is a valid TIGER160. +func isTIGER160(fl FieldLevel) bool { + return tiger160Regex().MatchString(fl.Field().String()) +} + +// isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192. +func isTIGER192(fl FieldLevel) bool { + return tiger192Regex().MatchString(fl.Field().String()) +} + +// isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +func isISBN(fl FieldLevel) bool { + return isISBN10(fl) || isISBN13(fl) +} + +// isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +func isISBN13(fl FieldLevel) bool { + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex().MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +func isISBN10(fl FieldLevel) bool { + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex().MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// isISSN is the validation function for validating if the field's value is a valid ISSN. +func isISSN(fl FieldLevel) bool { + s := fl.Field().String() + + if !iSSNRegex().MatchString(s) { + return false + } + s = strings.ReplaceAll(s, "-", "") + + pos := 8 + checksum := 0 + + for i := 0; i < 7; i++ { + checksum += pos * int(s[i]-'0') + pos-- + } + + if s[7] == 'X' { + checksum += 10 + } else { + checksum += int(s[7] - '0') + } + + return checksum%11 == 0 +} + +// isEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. +func isEthereumAddress(fl FieldLevel) bool { + address := fl.Field().String() + + return ethAddressRegex().MatchString(address) +} + +// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksummed Ethereum address. +func isEthereumAddressChecksum(fl FieldLevel) bool { + address := fl.Field().String() + + if !ethAddressRegex().MatchString(address) { + return false + } + // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md + address = address[2:] // Skip "0x" prefix. + h := sha3.NewLegacyKeccak256() + // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash + _, _ = h.Write([]byte(strings.ToLower(address))) + hash := hex.EncodeToString(h.Sum(nil)) + + for i := 0; i < len(address); i++ { + if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case. + continue + } + if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' { + return false + } + } + + return true +} + +// isBitcoinAddress is the validation function for validating if the field's value is a valid btc address +func isBitcoinAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcAddressRegex().MatchString(address) { + return false + } + + alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + + decode := [25]byte{} + + for _, n := range []byte(address) { + d := bytes.IndexByte(alphabet, n) + + for i := 24; i >= 0; i-- { + d += 58 * int(decode[i]) + decode[i] = byte(d % 256) + d /= 256 + } + } + + h := sha256.New() + _, _ = h.Write(decode[:21]) + d := h.Sum([]byte{}) + h = sha256.New() + _, _ = h.Write(d) + + validchecksum := [4]byte{} + computedchecksum := [4]byte{} + + copy(computedchecksum[:], h.Sum(d[:0])) + copy(validchecksum[:], decode[21:]) + + return validchecksum == computedchecksum +} + +// isBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +func isBitcoinBech32Address(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcLowerAddressRegexBech32().MatchString(address) && !btcUpperAddressRegexBech32().MatchString(address) { + return false + } + + am := len(address) % 8 + + if am == 0 || am == 3 || am == 5 { + return false + } + + address = strings.ToLower(address) + + alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + + hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc + addr := address[3:] + dp := make([]int, 0, len(addr)) + + for _, c := range addr { + dp = append(dp, strings.IndexRune(alphabet, c)) + } + + ver := dp[0] + + if ver < 0 || ver > 16 { + return false + } + + if ver == 0 { + if len(address) != 42 && len(address) != 62 { + return false + } + } + + values := append(hr, dp...) + + GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + + p := 1 + + for _, v := range values { + b := p >> 25 + p = (p&0x1ffffff)<<5 ^ v + + for i := 0; i < 5; i++ { + if (b>>uint(i))&1 == 1 { + p ^= GEN[i] + } + } + } + + if p != 1 { + return false + } + + b := uint(0) + acc := 0 + mv := (1 << 5) - 1 + var sw []int + + for _, v := range dp[1 : len(dp)-6] { + acc = (acc << 5) | v + b += 5 + for b >= 8 { + b -= 8 + sw = append(sw, (acc>>b)&mv) + } + } + + if len(sw) < 2 || len(sw) > 40 { + return false + } + + return true +} + +// excludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +func excludesRune(fl FieldLevel) bool { + return !containsRune(fl) +} + +// excludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +func excludesAll(fl FieldLevel) bool { + return !containsAny(fl) +} + +// excludes is the validation function for validating that the field's value does not contain the text specified within the param. +func excludes(fl FieldLevel) bool { + return !contains(fl) +} + +// containsRune is the validation function for validating that the field's value contains the rune specified within the param. +func containsRune(fl FieldLevel) bool { + r, _ := utf8.DecodeRuneInString(fl.Param()) + + return strings.ContainsRune(fl.Field().String(), r) +} + +// containsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +func containsAny(fl FieldLevel) bool { + return strings.ContainsAny(fl.Field().String(), fl.Param()) +} + +// contains is the validation function for validating that the field's value contains the text specified within the param. +func contains(fl FieldLevel) bool { + return strings.Contains(fl.Field().String(), fl.Param()) +} + +// startsWith is the validation function for validating that the field's value starts with the text specified within the param. +func startsWith(fl FieldLevel) bool { + return strings.HasPrefix(fl.Field().String(), fl.Param()) +} + +// endsWith is the validation function for validating that the field's value ends with the text specified within the param. +func endsWith(fl FieldLevel) bool { + return strings.HasSuffix(fl.Field().String(), fl.Param()) +} + +// startsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. +func startsNotWith(fl FieldLevel) bool { + return !startsWith(fl) +} + +// endsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. +func endsNotWith(fl FieldLevel) bool { + return !endsWith(fl) +} + +// fieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +func fieldContains(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + + if !ok { + return false + } + + return strings.Contains(field.String(), currentField.String()) +} + +// fieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +func fieldExcludes(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + if !ok { + return true + } + + return !strings.Contains(field.String(), currentField.String()) +} + +// isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +func isNeField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + + if !ok || currentKind != kind { + return true + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Bool: + return field.Bool() != currentField.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField).(time.Time) + fieldTime := getValue(field).(time.Time) + + return !fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// isNe is the validation function for validating that the field's value does not equal the provided param value. +func isNe(fl FieldLevel) bool { + return !isEq(fl) +} + +// isNeIgnoreCase is the validation function for validating that the field's string value does not equal the +// provided param value. The comparison is case-insensitive +func isNeIgnoreCase(fl FieldLevel) bool { + return !isEqIgnoreCase(fl) +} + +// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +func isLteCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + fieldTime := getValue(field.Convert(timeType)).(time.Time) + topTime := getValue(topField.Convert(timeType)).(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func isLtCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + fieldTime := getValue(field.Convert(timeType)).(time.Time) + topTime := getValue(topField.Convert(timeType)).(time.Time) + + return fieldTime.Before(topTime) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +func isGteCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + fieldTime := getValue(field.Convert(timeType)).(time.Time) + topTime := getValue(topField.Convert(timeType)).(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +func isGtCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + fieldTime := getValue(field.Convert(timeType)).(time.Time) + topTime := getValue(topField.Convert(timeType)).(time.Time) + + return fieldTime.After(topTime) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +func isNeCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return true + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Bool: + return topField.Bool() != field.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + t := getValue(field.Convert(timeType)).(time.Time) + fieldTime := getValue(topField.Convert(timeType)).(time.Time) + + return !fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +func isEqCrossStructField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Bool: + return topField.Bool() == field.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { + t := getValue(field.Convert(timeType)).(time.Time) + fieldTime := getValue(topField.Convert(timeType)).(time.Time) + + return fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +func isEqField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Bool: + return field.Bool() == currentField.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField.Convert(timeType)).(time.Time) + fieldTime := getValue(field.Convert(timeType)).(time.Time) + + return fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// isEq is the validation function for validating if the current field's value is equal to the param's value. +func isEq(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() == p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() == p + + case reflect.Bool: + p := asBool(param) + + return field.Bool() == p + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isEqIgnoreCase is the validation function for validating if the current field's string value is +// equal to the param's value. +// The comparison is case-insensitive. +func isEqIgnoreCase(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + return strings.EqualFold(field.String(), param) + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2 +// example: `postcode_iso3166_alpha2=US` +func isPostcodeByIso3166Alpha2(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + postcodeRegexInit.Do(initPostcodes) + reg, found := postCodeRegexDict[param] + if !found { + return false + } + + return reg.MatchString(field.String()) +} + +// isPostcodeByIso3166Alpha2Field validates by field which represents for a value of country code in iso 3166 alpha 2 +// example: `postcode_iso3166_alpha2_field=CountryCode` +func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { + field := fl.Field() + params := parseOneOfParam2(fl.Param()) + + if len(params) != 1 { + return false + } + + currentField, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), params[0]) + if !found { + return false + } + + if kind != reflect.String { + panic(fmt.Sprintf("Bad field type %s", currentField.Type())) + } + + postcodeRegexInit.Do(initPostcodes) + reg, found := postCodeRegexDict[currentField.String()] + if !found { + return false + } + + return reg.MatchString(field.String()) +} + +// isBase32 is the validation function for validating if the current field's value is a valid base 32. +func isBase32(fl FieldLevel) bool { + return base32Regex().MatchString(fl.Field().String()) +} + +// isBase64 is the validation function for validating if the current field's value is a valid base 64. +func isBase64(fl FieldLevel) bool { + return base64Regex().MatchString(fl.Field().String()) +} + +// isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +func isBase64URL(fl FieldLevel) bool { + return base64URLRegex().MatchString(fl.Field().String()) +} + +// isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding. +func isBase64RawURL(fl FieldLevel) bool { + return base64RawURLRegex().MatchString(fl.Field().String()) +} + +// isURI is the validation function for validating if the current field's value is a valid URI. +func isURI(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isURL is the validation function for validating if the current field's value is a valid URL. +func isURL(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + + s := strings.ToLower(field.String()) + + if len(s) == 0 { + return false + } + + url, err := url.Parse(s) + if err != nil || url.Scheme == "" { + return false + } + isFileScheme := url.Scheme == "file" + + if (isFileScheme && (len(url.Path) == 0 || url.Path == "/")) || (!isFileScheme && len(url.Host) == 0 && len(url.Fragment) == 0 && len(url.Opaque) == 0) { + return false + } + + return true + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isHttpURL is the validation function for validating if the current field's value is a valid HTTP(s) URL. +func isHttpURL(fl FieldLevel) bool { + if !isURL(fl) { + return false + } + + field := fl.Field() + switch field.Kind() { + case reflect.String: + + s := strings.ToLower(field.String()) + + url, err := url.Parse(s) + if err != nil || url.Host == "" { + return false + } + + return url.Scheme == "http" || url.Scheme == "https" + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isHttpsURL is the validation function for validating if the current field's value is a valid HTTPS-only URL. +func isHttpsURL(fl FieldLevel) bool { + if !isURL(fl) { + return false + } + + field := fl.Field() + switch field.Kind() { + case reflect.String: + + s := strings.ToLower(field.String()) + + url, err := url.Parse(s) + if err != nil || url.Host == "" { + return false + } + + return url.Scheme == "https" + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141. +func isUrnRFC2141(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + + str := field.String() + + _, match := urn.Parse([]byte(str)) + + return match + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isFile is the validation function for validating if the current field's value is a valid existing file path. +func isFile(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return !fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isImage is the validation function for validating if the current field's value contains the path to a valid image file +func isImage(fl FieldLevel) bool { + mimetypes := map[string]bool{ + "image/bmp": true, + "image/cis-cod": true, + "image/gif": true, + "image/ief": true, + "image/jpeg": true, + "image/jp2": true, + "image/jpx": true, + "image/jpm": true, + "image/pipeg": true, + "image/png": true, + "image/svg+xml": true, + "image/tiff": true, + "image/webp": true, + "image/x-cmu-raster": true, + "image/x-cmx": true, + "image/x-icon": true, + "image/x-portable-anymap": true, + "image/x-portable-bitmap": true, + "image/x-portable-graymap": true, + "image/x-portable-pixmap": true, + "image/x-rgb": true, + "image/x-xbitmap": true, + "image/x-xpixmap": true, + "image/x-xwindowdump": true, + } + field := fl.Field() + + switch field.Kind() { + case reflect.String: + filePath := field.String() + fileInfo, err := os.Stat(filePath) + if err != nil { + return false + } + + if fileInfo.IsDir() { + return false + } + + file, err := os.Open(filePath) + if err != nil { + return false + } + defer func() { + _ = file.Close() + }() + + mime, err := mimetype.DetectReader(file) + if err != nil { + return false + } + + if _, ok := mimetypes[mime.String()]; ok { + return true + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isFilePath is the validation function for validating if the current field's value is a valid file path. +func isFilePath(fl FieldLevel) bool { + var exists bool + var err error + + field := fl.Field() + + // Not valid if it is a directory. + if isDir(fl) { + return false + } + // If it exists, it obviously is valid. + // This is done first to avoid code duplication and unnecessary additional logic. + if exists = isFile(fl); exists { + return true + } + + // It does not exist but may still be a valid filepath. + switch field.Kind() { + case reflect.String: + // Every OS allows for whitespace, but none + // let you use a file with no filename (to my knowledge). + // Unless you're dealing with raw inodes, but I digress. + if strings.TrimSpace(field.String()) == "" { + return false + } + // We make sure it isn't a directory. + if strings.HasSuffix(field.String(), string(os.PathSeparator)) { + return false + } + if _, err = os.Stat(field.String()); err != nil { + switch t := err.(type) { + case *fs.PathError: + if t.Err == syscall.EINVAL { + // It's definitely an invalid character in the filepath. + return false + } + // It could be a permission error, a does-not-exist error, etc. + // Out-of-scope for this validation, though. + return true + default: + // Something went *seriously* wrong. + /* + Per https://pkg.go.dev/os#Stat: + "If there is an error, it will be of type *PathError." + */ + panic(err) + } + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. +func isE164(fl FieldLevel) bool { + return e164Regex().MatchString(fl.Field().String()) +} + +// isEmail is the validation function for validating if the current field's value is a valid email address. +func isEmail(fl FieldLevel) bool { + _, err := mail.ParseAddress(fl.Field().String()) + if err != nil { + return false + } + return emailRegex().MatchString(fl.Field().String()) +} + +// isHSLA is the validation function for validating if the current field's value is a valid HSLA color. +func isHSLA(fl FieldLevel) bool { + return hslaRegex().MatchString(fl.Field().String()) +} + +// isHSL is the validation function for validating if the current field's value is a valid HSL color. +func isHSL(fl FieldLevel) bool { + return hslRegex().MatchString(fl.Field().String()) +} + +// isRGBA is the validation function for validating if the current field's value is a valid RGBA color. +func isRGBA(fl FieldLevel) bool { + return rgbaRegex().MatchString(fl.Field().String()) +} + +// isRGB is the validation function for validating if the current field's value is a valid RGB color. +func isRGB(fl FieldLevel) bool { + return rgbRegex().MatchString(fl.Field().String()) +} + +// isHEXColor is the validation function for validating if the current field's value is a valid HEX color. +func isHEXColor(fl FieldLevel) bool { + return hexColorRegex().MatchString(fl.Field().String()) +} + +// isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +func isHexadecimal(fl FieldLevel) bool { + return hexadecimalRegex().MatchString(fl.Field().String()) +} + +// isNumber is the validation function for validating if the current field's value is a valid number. +func isNumber(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numberRegex().MatchString(fl.Field().String()) + } +} + +// isNumeric is the validation function for validating if the current field's value is a valid numeric value. +func isNumeric(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numericRegex().MatchString(fl.Field().String()) + } +} + +// isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +func isAlphanum(fl FieldLevel) bool { + return alphaNumericRegex().MatchString(fl.Field().String()) +} + +// isAlpha is the validation function for validating if the current field's value is a valid alpha value. +func isAlpha(fl FieldLevel) bool { + return alphaRegex().MatchString(fl.Field().String()) +} + +// isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +func isAlphanumUnicode(fl FieldLevel) bool { + return alphaUnicodeNumericRegex().MatchString(fl.Field().String()) +} + +// isAlphaSpace is the validation function for validating if the current field's value is a valid alpha value with spaces. +func isAlphaSpace(fl FieldLevel) bool { + return alphaSpaceRegex().MatchString(fl.Field().String()) +} + +// isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +func isAlphaUnicode(fl FieldLevel) bool { + return alphaUnicodeRegex().MatchString(fl.Field().String()) +} + +// isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value. +func isBoolean(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Bool: + return true + default: + _, err := strconv.ParseBool(fl.Field().String()) + return err == nil + } +} + +// isDefault is the opposite of required aka hasValue +func isDefault(fl FieldLevel) bool { + return !hasValue(fl) +} + +// hasValue is the validation function for validating if the current field's value is not the default static value. +func hasValue(fl FieldLevel) bool { + field := fl.Field() + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && getValue(field) != nil { + return true + } + return field.IsValid() && !field.IsZero() + } +} + +// hasNotZeroValue is the validation function for validating if the current field's value is not the zero value for its type. +func hasNotZeroValue(fl FieldLevel) bool { + field := fl.Field() + switch field.Kind() { + case reflect.Slice, reflect.Map: + // For slices and maps, consider them "not zero" only if they're both non-nil AND have elements + return !field.IsNil() && field.Len() > 0 + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && getValue(field) != nil { + return !field.IsZero() + } + return field.IsValid() && !field.IsZero() + } +} + +// requireCheckFieldKind is a func for check field kind +func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool { + field := fl.Field() + kind := field.Kind() + var nullable, found bool + if len(param) > 0 { + field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + } + switch kind { + case reflect.Invalid: + return defaultNotFoundValue + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return field.IsNil() + default: + if nullable && getValue(field) != nil { + return false + } + return field.IsValid() && field.IsZero() + } +} + +// requireCheckFieldValue is a func for check field value +func requireCheckFieldValue( + fl FieldLevel, param string, value string, defaultNotFoundValue bool, +) bool { + field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == asInt(value) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == asUint(value) + + case reflect.Float32: + return field.Float() == asFloat32(value) + + case reflect.Float64: + return field.Float() == asFloat64(value) + + case reflect.Slice, reflect.Map: + if value == "nil" { + return field.IsNil() + } + return int64(field.Len()) == asInt(value) + case reflect.Array: + // Arrays can't be nil, so only compare lengths + return int64(field.Len()) == asInt(value) + + case reflect.Bool: + return field.Bool() == (value == "true") + + case reflect.Ptr: + if field.IsNil() { + return value == "nil" + } + // Handle non-nil pointers + return requireCheckFieldValue(fl, param, value, defaultNotFoundValue) + } + + // default reflect.String: + return field.String() == value +} + +// requiredIf is the validation function +// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field. +func requiredIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName())) + } + + seen := make(map[string]struct{}) + for i := 0; i < len(params); i += 2 { + if _, ok := seen[params[i]]; ok { + panic(fmt.Sprintf("Duplicate param %s for required_if %s", params[i], fl.FieldName())) + } + seen[params[i]] = struct{}{} + } + + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// excludedIf is the validation function +// The field under validation must not be present or is empty only if all the other specified fields are equal to the value following with the specified field. +func excludedIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for excluded_if %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return !hasValue(fl) +} + +// requiredUnless is the validation function +// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. +func requiredUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// skipUnless is the validation function +// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. +func skipUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for skip_unless %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// excludedUnless is the validation function +// The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field. +func excludedUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for excluded_unless %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return !hasValue(fl) + } + } + return true +} + +// excludedWith is the validation function +// The field under validation must not be present or is empty if any of the other specified fields are present. +func excludedWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return !hasValue(fl) + } + } + return true +} + +// requiredWith is the validation function +// The field under validation must be present and not empty only if any of the other specified fields are present. +func requiredWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return hasValue(fl) + } + } + return true +} + +// excludedWithAll is the validation function +// The field under validation must not be present or is empty if all of the other specified fields are present. +func excludedWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// requiredWithAll is the validation function +// The field under validation must be present and not empty only if all of the other specified fields are present. +func requiredWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// excludedWithout is the validation function +// The field under validation must not be present or is empty when any of the other specified fields are not present. +func excludedWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return !hasValue(fl) + } + return true +} + +// requiredWithout is the validation function +// The field under validation must be present and not empty only when any of the other specified fields are not present. +func requiredWithout(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return hasValue(fl) + } + } + return true +} + +// excludedWithoutAll is the validation function +// The field under validation must not be present or is empty when all of the other specified fields are not present. +func excludedWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// requiredWithoutAll is the validation function +// The field under validation must be present and not empty only when all of the other specified fields are not present. +func requiredWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +func isGteField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField.Convert(timeType)).(time.Time) + fieldTime := getValue(field.Convert(timeType)).(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +func isGtField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField.Convert(timeType)).(time.Time) + fieldTime := getValue(field.Convert(timeType)).(time.Time) + + return fieldTime.After(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// isGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +func isGte(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() >= p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() >= p + + case reflect.Struct: + + if field.Type().ConvertibleTo(timeType) { + now := time.Now().UTC() + t := getValue(field.Convert(timeType)).(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isGt is the validation function for validating if the current field's value is greater than the param's value. +func isGt(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() > p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() > p + + case reflect.Struct: + + if field.Type().ConvertibleTo(timeType) { + return getValue(field.Convert(timeType)).(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// hasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +func hasLengthOf(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() == p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// hasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +func hasMinOf(fl FieldLevel) bool { + return isGte(fl) +} + +// isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +func isLteField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField.Convert(timeType)).(time.Time) + fieldTime := getValue(field.Convert(timeType)).(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +func isLtField(fl FieldLevel) bool { + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { + t := getValue(currentField.Convert(timeType)).(time.Time) + fieldTime := getValue(field.Convert(timeType)).(time.Time) + + return fieldTime.Before(t) + } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// isLte is the validation function for validating if the current field's value is less than or equal to the param's value. +func isLte(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() <= p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() <= p + + case reflect.Struct: + + if field.Type().ConvertibleTo(timeType) { + now := time.Now().UTC() + t := getValue(field.Convert(timeType)).(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isLt is the validation function for validating if the current field's value is less than the param's value. +func isLt(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32: + p := asFloat32(param) + + return field.Float() < p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() < p + + case reflect.Struct: + + if field.Type().ConvertibleTo(timeType) { + return getValue(field.Convert(timeType)).(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// hasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +func hasMaxOf(fl FieldLevel) bool { + return isLte(fl) +} + +// isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +func isTCP4AddrResolvable(fl FieldLevel) bool { + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", fl.Field().String()) + return err == nil +} + +// isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +func isTCP6AddrResolvable(fl FieldLevel) bool { + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", fl.Field().String()) + + return err == nil +} + +// isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +func isTCPAddrResolvable(fl FieldLevel) bool { + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", fl.Field().String()) + + return err == nil +} + +// isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +func isUDP4AddrResolvable(fl FieldLevel) bool { + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", fl.Field().String()) + + return err == nil +} + +// isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +func isUDP6AddrResolvable(fl FieldLevel) bool { + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", fl.Field().String()) + + return err == nil +} + +// isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +func isUDPAddrResolvable(fl FieldLevel) bool { + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp", fl.Field().String()) + + return err == nil +} + +// isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +func isIP4AddrResolvable(fl FieldLevel) bool { + if !isIPv4(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip4", fl.Field().String()) + + return err == nil +} + +// isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +func isIP6AddrResolvable(fl FieldLevel) bool { + if !isIPv6(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip6", fl.Field().String()) + + return err == nil +} + +// isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +func isIPAddrResolvable(fl FieldLevel) bool { + if !isIP(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip", fl.Field().String()) + + return err == nil +} + +// isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +func isUnixAddrResolvable(fl FieldLevel) bool { + _, err := net.ResolveUnixAddr("unix", fl.Field().String()) + + return err == nil +} + +func isIP4Addr(fl FieldLevel) bool { + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() != nil +} + +func isIP6Addr(fl FieldLevel) bool { + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() == nil +} + +func isHostnameRFC952(fl FieldLevel) bool { + return hostnameRegexRFC952().MatchString(fl.Field().String()) +} + +func isHostnameRFC1123(fl FieldLevel) bool { + return hostnameRegexRFC1123().MatchString(fl.Field().String()) +} + +func isFQDN(fl FieldLevel) bool { + val := fl.Field().String() + + if val == "" { + return false + } + + return fqdnRegexRFC1123().MatchString(val) +} + +// isDir is the validation function for validating if the current field's value is a valid existing directory. +func isDir(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isDirPath is the validation function for validating if the current field's value is a valid directory. +func isDirPath(fl FieldLevel) bool { + var exists bool + var err error + + field := fl.Field() + + // If it exists, it obviously is valid. + // This is done first to avoid code duplication and unnecessary additional logic. + if exists = isDir(fl); exists { + return true + } + + // It does not exist but may still be a valid path. + switch field.Kind() { + case reflect.String: + // Every OS allows for whitespace, but none + // let you use a dir with no name (to my knowledge). + // Unless you're dealing with raw inodes, but I digress. + if strings.TrimSpace(field.String()) == "" { + return false + } + if _, err = os.Stat(field.String()); err != nil { + switch t := err.(type) { + case *fs.PathError: + if t.Err == syscall.EINVAL { + // It's definitely an invalid character in the path. + return false + } + // It could be a permission error, a does-not-exist error, etc. + // Out-of-scope for this validation, though. + // Lastly, we make sure it is a directory. + if strings.HasSuffix(field.String(), string(os.PathSeparator)) { + return true + } else { + return false + } + default: + // Something went *seriously* wrong. + /* + Per https://pkg.go.dev/os#Stat: + "If there is an error, it will be of type *PathError." + */ + panic(err) + } + } + // We repeat the check here to make sure it is an explicit directory in case the above os.Stat didn't trigger an error. + if strings.HasSuffix(field.String(), string(os.PathSeparator)) { + return true + } else { + return false + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isJSON is the validation function for validating if the current field's value is a valid json string. +func isJSON(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + val := field.String() + return json.Valid([]byte(val)) + case reflect.Slice: + fieldType := field.Type() + + if fieldType.ConvertibleTo(byteSliceType) { + b := getValue(field.Convert(byteSliceType)).([]byte) + return json.Valid(b) + } + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isJWT is the validation function for validating if the current field's value is a valid JWT string. +func isJWT(fl FieldLevel) bool { + return jWTRegex().MatchString(fl.Field().String()) +} + +// isHostnamePort validates a : combination for fields typically used for socket address. +func isHostnamePort(fl FieldLevel) bool { + val := fl.Field().String() + host, port, err := net.SplitHostPort(val) + if err != nil { + return false + } + // Port must be a iny <= 65535. + if portNum, err := strconv.ParseInt( + port, 10, 32, + ); err != nil || portNum > 65535 || portNum < 1 { + return false + } + + // If host is specified, it should match a DNS name + if host != "" { + return hostnameRegexRFC1123().MatchString(host) + } + return true +} + +// IsPort validates if the current field's value represents a valid port +func isPort(fl FieldLevel) bool { + val := fl.Field().Uint() + + return val >= 1 && val <= 65535 +} + +// isLowercase is the validation function for validating if the current field's value is a lowercase string. +func isLowercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToLower(field.String()) + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isUppercase is the validation function for validating if the current field's value is an uppercase string. +func isUppercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToUpper(field.String()) + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isDatetime is the validation function for validating if the current field's value is a valid datetime string. +func isDatetime(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + if field.Kind() == reflect.String { + _, err := time.Parse(param, field.String()) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isTimeZone is the validation function for validating if the current field's value is a valid time zone string. +func isTimeZone(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name + if field.String() == "" { + return false + } + + // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name + if strings.ToLower(field.String()) == "local" { + return false + } + + _, err := time.LoadLocation(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code. +func isIso3166Alpha2(fl FieldLevel) bool { + _, ok := iso3166_1_alpha2[fl.Field().String()] + return ok +} + +// isIso3166Alpha2EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 European Union country code. +func isIso3166Alpha2EU(fl FieldLevel) bool { + _, ok := iso3166_1_alpha2_eu[fl.Field().String()] + return ok +} + +// isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code. +func isIso3166Alpha3(fl FieldLevel) bool { + _, ok := iso3166_1_alpha3[fl.Field().String()] + return ok +} + +// isIso3166Alpha3EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 European Union country code. +func isIso3166Alpha3EU(fl FieldLevel) bool { + _, ok := iso3166_1_alpha3_eu[fl.Field().String()] + return ok +} + +// isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code. +func isIso3166AlphaNumeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.String: + i, err := strconv.Atoi(field.String()) + if err != nil { + return false + } + code = i % 1000 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + + _, ok := iso3166_1_alpha_numeric[code] + return ok +} + +// isIso3166AlphaNumericEU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric European Union country code. +func isIso3166AlphaNumericEU(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.String: + i, err := strconv.Atoi(field.String()) + if err != nil { + return false + } + code = i % 1000 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + + _, ok := iso3166_1_alpha_numeric_eu[code] + return ok +} + +// isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code. +func isIso31662(fl FieldLevel) bool { + _, ok := iso3166_2[fl.Field().String()] + return ok +} + +// isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code. +func isIso4217(fl FieldLevel) bool { + _, ok := iso4217[fl.Field().String()] + return ok +} + +// isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code. +func isIso4217Numeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint()) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + + _, ok := iso4217_numeric[code] + return ok +} + +// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse +func isBCP47LanguageTag(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + _, err := language.Parse(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %s", field.Type())) +} + +// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362 +func isIsoBicFormat(fl FieldLevel) bool { + bicString := fl.Field().String() + + return bicRegex().MatchString(bicString) +} + +// isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0 +func isSemverFormat(fl FieldLevel) bool { + semverString := fl.Field().String() + + return semverRegex().MatchString(semverString) +} + +// isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org +func isCveFormat(fl FieldLevel) bool { + cveString := fl.Field().String() + + return cveRegex().MatchString(cveString) +} + +// isDnsRFC1035LabelFormat is the validation function +// for validating if the current field's value is +// a valid dns RFC 1035 label, defined in RFC 1035. +func isDnsRFC1035LabelFormat(fl FieldLevel) bool { + val := fl.Field().String() + + size := len(val) + if size > 63 { + return false + } + + return dnsRegexRFC1035Label().MatchString(val) +} + +// digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements +func digitsHaveLuhnChecksum(digits []string) bool { + size := len(digits) + sum := 0 + for i, digit := range digits { + value, err := strconv.Atoi(digit) + if err != nil { + return false + } + if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 { + v := value * 2 + if v >= 10 { + sum += 1 + (v % 10) + } else { + sum += v + } + } else { + sum += value + } + } + return (sum % 10) == 0 +} + +// isMongoDBObjectId is the validation function for validating if the current field's value is valid MongoDB ObjectID +func isMongoDBObjectId(fl FieldLevel) bool { + val := fl.Field().String() + return mongodbIdRegex().MatchString(val) +} + +// isMongoDBConnectionString is the validation function for validating if the current field's value is valid MongoDB Connection String +func isMongoDBConnectionString(fl FieldLevel) bool { + val := fl.Field().String() + return mongodbConnectionRegex().MatchString(val) +} + +// isSpiceDB is the validation function for validating if the current field's value is valid for use with Authzed SpiceDB in the indicated way +func isSpiceDB(fl FieldLevel) bool { + val := fl.Field().String() + param := fl.Param() + + switch param { + case "permission": + return spicedbPermissionRegex().MatchString(val) + case "type": + return spicedbTypeRegex().MatchString(val) + case "id", "": + return spicedbIDRegex().MatchString(val) + } + + panic("Unrecognized parameter: " + param) +} + +// isCreditCard is the validation function for validating if the current field's value is a valid credit card number +func isCreditCard(fl FieldLevel) bool { + val := fl.Field().String() + var creditCard bytes.Buffer + segments := strings.Split(val, " ") + for _, segment := range segments { + if len(segment) < 3 { + return false + } + creditCard.WriteString(segment) + } + + ccDigits := strings.Split(creditCard.String(), "") + size := len(ccDigits) + if size < 12 || size > 19 { + return false + } + + return digitsHaveLuhnChecksum(ccDigits) +} + +// hasLuhnChecksum is the validation for validating if the current field's value has a valid Luhn checksum +func hasLuhnChecksum(fl FieldLevel) bool { + field := fl.Field() + var str string // convert to a string which will then be split into single digits; easier and more readable than shifting/extracting single digits from a number + switch field.Kind() { + case reflect.String: + str = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %s", field.Type())) + } + size := len(str) + if size < 2 { // there has to be at least one digit that carries a meaning + the checksum + return false + } + digits := strings.Split(str, "") + return digitsHaveLuhnChecksum(digits) +} + +// isCron is the validation function for validating if the current field's value is a valid cron expression +func isCron(fl FieldLevel) bool { + cronString := fl.Field().String() + return cronRegex().MatchString(cronString) +} + +// isEIN is the validation function for validating if the current field's value is a valid U.S. Employer Identification Number (EIN) +func isEIN(fl FieldLevel) bool { + field := fl.Field() + + if field.Len() != 10 { + return false + } + + return einRegex().MatchString(field.String()) +} + +func isValidateFn(fl FieldLevel) bool { + const defaultParam = `Validate` + + field := fl.Field() + validateFn := cmp.Or(fl.Param(), defaultParam) + + ok, err := tryCallValidateFn(field, validateFn) + if err != nil { + return false + } + + return ok +} + +var ( + errMethodNotFound = errors.New(`method not found`) + errMethodReturnNoValues = errors.New(`method return o values (void)`) + errMethodReturnInvalidType = errors.New(`method should return invalid type`) +) + +func tryCallValidateFn(field reflect.Value, validateFn string) (bool, error) { + method := field.MethodByName(validateFn) + if field.CanAddr() && !method.IsValid() { + method = field.Addr().MethodByName(validateFn) + } + + if !method.IsValid() { + return false, fmt.Errorf("unable to call %q on type %q: %w", + validateFn, field.Type().String(), errMethodNotFound) + } + + returnValues := method.Call([]reflect.Value{}) + if len(returnValues) == 0 { + return false, fmt.Errorf("unable to use result of method %q on type %q: %w", + validateFn, field.Type().String(), errMethodReturnNoValues) + } + + firstReturnValue := returnValues[0] + + switch firstReturnValue.Kind() { + case reflect.Bool: + return firstReturnValue.Bool(), nil + case reflect.Interface: + errorType := reflect.TypeOf((*error)(nil)).Elem() + + if firstReturnValue.Type().Implements(errorType) { + return firstReturnValue.IsNil(), nil + } + + return false, fmt.Errorf("unable to use result of method %q on type %q: %w (got interface %v expect error)", + validateFn, field.Type().String(), errMethodReturnInvalidType, firstReturnValue.Type().String()) + default: + return false, fmt.Errorf("unable to use result of method %q on type %q: %w (got %v expect error or bool)", + validateFn, field.Type().String(), errMethodReturnInvalidType, firstReturnValue.Type().String()) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go new file mode 100644 index 0000000000..fb101b064f --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -0,0 +1,326 @@ +package validator + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeOmitEmpty + typeIsDefault + typeNoStructLevel + typeStructOnly + typeDive + typeOr + typeKeys + typeEndKeys + typeOmitNil + typeOmitZero +) + +const ( + invalidValidation = "Invalid validation tag on field '%s'" + undefinedValidation = "Undefined validation function '%s' on field '%s'" + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + m := sc.m.Load().(map[reflect.Type]*cStruct) + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + m := tc.m.Load().(map[string]*cTag) + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + name string + fields []*cField + fn StructLevelFuncCtx +} + +type cField struct { + idx int + name string + altName string + namesEqual bool + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + param string + keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation + next *cTag + fn FuncCtx + typeof tagType + hasTag bool + hasAlias bool + hasParam bool // true if parameter used eg. eq= where the equal sign has been set + isBlockEnd bool // indicates the current tag represents the last validation in the block + runValidationWhenNil bool +} + +func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { + v.structCache.lock.Lock() + defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := v.structCache.Get(typ) + if ok { + return cs + } + + cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} + + numFields := current.NumField() + rules := v.rules[typ] + + var ctag *cTag + var fld reflect.StructField + var tag string + var customName string + + for i := 0; i < numFields; i++ { + fld = typ.Field(i) + + if !v.privateFieldValidation && !fld.Anonymous && len(fld.PkgPath) > 0 { + continue + } + + if rtag, ok := rules[fld.Name]; ok { + tag = rtag + } else { + tag = fld.Tag.Get(v.tagName) + } + + if tag == skipValidationTag { + continue + } + + customName = fld.Name + + if v.hasTagNameFunc { + name := v.tagNameFunc(fld) + if len(name) > 0 { + customName = name + } + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + + if len(tag) > 0 { + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields = append(cs.fields, &cField{ + idx: i, + name: fld.Name, + altName: customName, + cTags: ctag, + namesEqual: fld.Name == customName, + }) + } + v.structCache.Set(typ, cs) + return cs +} + +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { + var t string + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + t = tags[i] + if noAlias { + alias = t + } + + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliases[t]; found { + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr + } + continue + } + + var prevTag tagType + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault} + firstCtag = current + } else { + prevTag = current.typeof + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch t { + case diveTag: + current.typeof = typeDive + + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag)) + } + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + panic(keysTagNotDefined) + } + return + + case omitzero: + current.typeof = typeOmitZero + continue + + case omitempty: + current.typeof = typeOmitEmpty + + case omitnil: + current.typeof = typeOmitNil + + case structOnlyTag: + current.typeof = typeStructOnly + + case noStructLevelTag: + current.typeof = typeNoStructLevel + + default: + if t == isdefault { + current.typeof = typeIsDefault + } + // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" + orVals := strings.Split(t, orSeparator) + + for j := 0; j < len(orVals); j++ { + vals := strings.SplitN(orVals[j], tagKeySeparator, 2) + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = t + } + + if j > 0 { + current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} + current = current.next + } + current.hasParam = len(vals) > 1 + + current.tag = vals[0] + if len(current.tag) == 0 { + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) + } + + if wrapper, ok := v.validations[current.tag]; ok { + current.fn = wrapper.fn + current.runValidationWhenNil = wrapper.runValidationOnNil + } else { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) + } + + if len(orVals) > 1 { + current.typeof = typeOr + } + + if len(vals) > 1 { + current.param = strings.ReplaceAll(strings.ReplaceAll(vals[1], utf8HexComma, ","), utf8Pipe, "|") + } + } + current.isBlockEnd = true + } + } + return +} + +func (v *Validate) fetchCacheTag(tag string) *cTag { + // find cached tag + ctag, found := v.tagCache.Get(tag) + if !found { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, found = v.tagCache.Get(tag) + if !found { + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) + v.tagCache.Set(tag, ctag) + } + } + return ctag +} diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go new file mode 100644 index 0000000000..b5f10d3c11 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -0,0 +1,1177 @@ +package validator + +var iso3166_1_alpha2 = map[string]struct{}{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AF": {}, "AX": {}, "AL": {}, "DZ": {}, "AS": {}, + "AD": {}, "AO": {}, "AI": {}, "AQ": {}, "AG": {}, + "AR": {}, "AM": {}, "AW": {}, "AU": {}, "AT": {}, + "AZ": {}, "BS": {}, "BH": {}, "BD": {}, "BB": {}, + "BY": {}, "BE": {}, "BZ": {}, "BJ": {}, "BM": {}, + "BT": {}, "BO": {}, "BQ": {}, "BA": {}, "BW": {}, + "BV": {}, "BR": {}, "IO": {}, "BN": {}, "BG": {}, + "BF": {}, "BI": {}, "KH": {}, "CM": {}, "CA": {}, + "CV": {}, "KY": {}, "CF": {}, "TD": {}, "CL": {}, + "CN": {}, "CX": {}, "CC": {}, "CO": {}, "KM": {}, + "CG": {}, "CD": {}, "CK": {}, "CR": {}, "CI": {}, + "HR": {}, "CU": {}, "CW": {}, "CY": {}, "CZ": {}, + "DK": {}, "DJ": {}, "DM": {}, "DO": {}, "EC": {}, + "EG": {}, "SV": {}, "GQ": {}, "ER": {}, "EE": {}, + "ET": {}, "FK": {}, "FO": {}, "FJ": {}, "FI": {}, + "FR": {}, "GF": {}, "PF": {}, "TF": {}, "GA": {}, + "GM": {}, "GE": {}, "DE": {}, "GH": {}, "GI": {}, + "GR": {}, "GL": {}, "GD": {}, "GP": {}, "GU": {}, + "GT": {}, "GG": {}, "GN": {}, "GW": {}, "GY": {}, + "HT": {}, "HM": {}, "VA": {}, "HN": {}, "HK": {}, + "HU": {}, "IS": {}, "IN": {}, "ID": {}, "IR": {}, + "IQ": {}, "IE": {}, "IM": {}, "IL": {}, "IT": {}, + "JM": {}, "JP": {}, "JE": {}, "JO": {}, "KZ": {}, + "KE": {}, "KI": {}, "KP": {}, "KR": {}, "KW": {}, + "KG": {}, "LA": {}, "LV": {}, "LB": {}, "LS": {}, + "LR": {}, "LY": {}, "LI": {}, "LT": {}, "LU": {}, + "MO": {}, "MK": {}, "MG": {}, "MW": {}, "MY": {}, + "MV": {}, "ML": {}, "MT": {}, "MH": {}, "MQ": {}, + "MR": {}, "MU": {}, "YT": {}, "MX": {}, "FM": {}, + "MD": {}, "MC": {}, "MN": {}, "ME": {}, "MS": {}, + "MA": {}, "MZ": {}, "MM": {}, "NA": {}, "NR": {}, + "NP": {}, "NL": {}, "NC": {}, "NZ": {}, "NI": {}, + "NE": {}, "NG": {}, "NU": {}, "NF": {}, "MP": {}, + "NO": {}, "OM": {}, "PK": {}, "PW": {}, "PS": {}, + "PA": {}, "PG": {}, "PY": {}, "PE": {}, "PH": {}, + "PN": {}, "PL": {}, "PT": {}, "PR": {}, "QA": {}, + "RE": {}, "RO": {}, "RU": {}, "RW": {}, "BL": {}, + "SH": {}, "KN": {}, "LC": {}, "MF": {}, "PM": {}, + "VC": {}, "WS": {}, "SM": {}, "ST": {}, "SA": {}, + "SN": {}, "RS": {}, "SC": {}, "SL": {}, "SG": {}, + "SX": {}, "SK": {}, "SI": {}, "SB": {}, "SO": {}, + "ZA": {}, "GS": {}, "SS": {}, "ES": {}, "LK": {}, + "SD": {}, "SR": {}, "SJ": {}, "SZ": {}, "SE": {}, + "CH": {}, "SY": {}, "TW": {}, "TJ": {}, "TZ": {}, + "TH": {}, "TL": {}, "TG": {}, "TK": {}, "TO": {}, + "TT": {}, "TN": {}, "TR": {}, "TM": {}, "TC": {}, + "TV": {}, "UG": {}, "UA": {}, "AE": {}, "GB": {}, + "US": {}, "UM": {}, "UY": {}, "UZ": {}, "VU": {}, + "VE": {}, "VN": {}, "VG": {}, "VI": {}, "WF": {}, + "EH": {}, "YE": {}, "ZM": {}, "ZW": {}, "XK": {}, +} + +var iso3166_1_alpha2_eu = map[string]struct{}{ + "AT": {}, "BE": {}, "BG": {}, "HR": {}, "CY": {}, + "CZ": {}, "DK": {}, "EE": {}, "FI": {}, "FR": {}, + "DE": {}, "GR": {}, "HU": {}, "IE": {}, "IT": {}, + "LV": {}, "LT": {}, "LU": {}, "MT": {}, "NL": {}, + "PL": {}, "PT": {}, "RO": {}, "SK": {}, "SI": {}, + "ES": {}, "SE": {}, +} + +var iso3166_1_alpha3 = map[string]struct{}{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AFG": {}, "ALB": {}, "DZA": {}, "ASM": {}, "AND": {}, + "AGO": {}, "AIA": {}, "ATA": {}, "ATG": {}, "ARG": {}, + "ARM": {}, "ABW": {}, "AUS": {}, "AUT": {}, "AZE": {}, + "BHS": {}, "BHR": {}, "BGD": {}, "BRB": {}, "BLR": {}, + "BEL": {}, "BLZ": {}, "BEN": {}, "BMU": {}, "BTN": {}, + "BOL": {}, "BES": {}, "BIH": {}, "BWA": {}, "BVT": {}, + "BRA": {}, "IOT": {}, "BRN": {}, "BGR": {}, "BFA": {}, + "BDI": {}, "CPV": {}, "KHM": {}, "CMR": {}, "CAN": {}, + "CYM": {}, "CAF": {}, "TCD": {}, "CHL": {}, "CHN": {}, + "CXR": {}, "CCK": {}, "COL": {}, "COM": {}, "COD": {}, + "COG": {}, "COK": {}, "CRI": {}, "HRV": {}, "CUB": {}, + "CUW": {}, "CYP": {}, "CZE": {}, "CIV": {}, "DNK": {}, + "DJI": {}, "DMA": {}, "DOM": {}, "ECU": {}, "EGY": {}, + "SLV": {}, "GNQ": {}, "ERI": {}, "EST": {}, "SWZ": {}, + "ETH": {}, "FLK": {}, "FRO": {}, "FJI": {}, "FIN": {}, + "FRA": {}, "GUF": {}, "PYF": {}, "ATF": {}, "GAB": {}, + "GMB": {}, "GEO": {}, "DEU": {}, "GHA": {}, "GIB": {}, + "GRC": {}, "GRL": {}, "GRD": {}, "GLP": {}, "GUM": {}, + "GTM": {}, "GGY": {}, "GIN": {}, "GNB": {}, "GUY": {}, + "HTI": {}, "HMD": {}, "VAT": {}, "HND": {}, "HKG": {}, + "HUN": {}, "ISL": {}, "IND": {}, "IDN": {}, "IRN": {}, + "IRQ": {}, "IRL": {}, "IMN": {}, "ISR": {}, "ITA": {}, + "JAM": {}, "JPN": {}, "JEY": {}, "JOR": {}, "KAZ": {}, + "KEN": {}, "KIR": {}, "PRK": {}, "KOR": {}, "KWT": {}, + "KGZ": {}, "LAO": {}, "LVA": {}, "LBN": {}, "LSO": {}, + "LBR": {}, "LBY": {}, "LIE": {}, "LTU": {}, "LUX": {}, + "MAC": {}, "MDG": {}, "MWI": {}, "MYS": {}, "MDV": {}, + "MLI": {}, "MLT": {}, "MHL": {}, "MTQ": {}, "MRT": {}, + "MUS": {}, "MYT": {}, "MEX": {}, "FSM": {}, "MDA": {}, + "MCO": {}, "MNG": {}, "MNE": {}, "MSR": {}, "MAR": {}, + "MOZ": {}, "MMR": {}, "NAM": {}, "NRU": {}, "NPL": {}, + "NLD": {}, "NCL": {}, "NZL": {}, "NIC": {}, "NER": {}, + "NGA": {}, "NIU": {}, "NFK": {}, "MKD": {}, "MNP": {}, + "NOR": {}, "OMN": {}, "PAK": {}, "PLW": {}, "PSE": {}, + "PAN": {}, "PNG": {}, "PRY": {}, "PER": {}, "PHL": {}, + "PCN": {}, "POL": {}, "PRT": {}, "PRI": {}, "QAT": {}, + "ROU": {}, "RUS": {}, "RWA": {}, "REU": {}, "BLM": {}, + "SHN": {}, "KNA": {}, "LCA": {}, "MAF": {}, "SPM": {}, + "VCT": {}, "WSM": {}, "SMR": {}, "STP": {}, "SAU": {}, + "SEN": {}, "SRB": {}, "SYC": {}, "SLE": {}, "SGP": {}, + "SXM": {}, "SVK": {}, "SVN": {}, "SLB": {}, "SOM": {}, + "ZAF": {}, "SGS": {}, "SSD": {}, "ESP": {}, "LKA": {}, + "SDN": {}, "SUR": {}, "SJM": {}, "SWE": {}, "CHE": {}, + "SYR": {}, "TWN": {}, "TJK": {}, "TZA": {}, "THA": {}, + "TLS": {}, "TGO": {}, "TKL": {}, "TON": {}, "TTO": {}, + "TUN": {}, "TUR": {}, "TKM": {}, "TCA": {}, "TUV": {}, + "UGA": {}, "UKR": {}, "ARE": {}, "GBR": {}, "UMI": {}, + "USA": {}, "URY": {}, "UZB": {}, "VUT": {}, "VEN": {}, + "VNM": {}, "VGB": {}, "VIR": {}, "WLF": {}, "ESH": {}, + "YEM": {}, "ZMB": {}, "ZWE": {}, "ALA": {}, "UNK": {}, +} + +var iso3166_1_alpha3_eu = map[string]struct{}{ + "AUT": {}, "BEL": {}, "BGR": {}, "HRV": {}, "CYP": {}, + "CZE": {}, "DNK": {}, "EST": {}, "FIN": {}, "FRA": {}, + "DEU": {}, "GRC": {}, "HUN": {}, "IRL": {}, "ITA": {}, + "LVA": {}, "LTU": {}, "LUX": {}, "MLT": {}, "NLD": {}, + "POL": {}, "PRT": {}, "ROU": {}, "SVK": {}, "SVN": {}, + "ESP": {}, "SWE": {}, +} +var iso3166_1_alpha_numeric = map[int]struct{}{ + // see: https://www.iso.org/iso-3166-country-codes.html + 4: {}, 8: {}, 12: {}, 16: {}, 20: {}, + 24: {}, 660: {}, 10: {}, 28: {}, 32: {}, + 51: {}, 533: {}, 36: {}, 40: {}, 31: {}, + 44: {}, 48: {}, 50: {}, 52: {}, 112: {}, + 56: {}, 84: {}, 204: {}, 60: {}, 64: {}, + 68: {}, 535: {}, 70: {}, 72: {}, 74: {}, + 76: {}, 86: {}, 96: {}, 100: {}, 854: {}, + 108: {}, 132: {}, 116: {}, 120: {}, 124: {}, + 136: {}, 140: {}, 148: {}, 152: {}, 156: {}, + 162: {}, 166: {}, 170: {}, 174: {}, 180: {}, + 178: {}, 184: {}, 188: {}, 191: {}, 192: {}, + 531: {}, 196: {}, 203: {}, 384: {}, 208: {}, + 262: {}, 212: {}, 214: {}, 218: {}, 818: {}, + 222: {}, 226: {}, 232: {}, 233: {}, 748: {}, + 231: {}, 238: {}, 234: {}, 242: {}, 246: {}, + 250: {}, 254: {}, 258: {}, 260: {}, 266: {}, + 270: {}, 268: {}, 276: {}, 288: {}, 292: {}, + 300: {}, 304: {}, 308: {}, 312: {}, 316: {}, + 320: {}, 831: {}, 324: {}, 624: {}, 328: {}, + 332: {}, 334: {}, 336: {}, 340: {}, 344: {}, + 348: {}, 352: {}, 356: {}, 360: {}, 364: {}, + 368: {}, 372: {}, 833: {}, 376: {}, 380: {}, + 388: {}, 392: {}, 832: {}, 400: {}, 398: {}, + 404: {}, 296: {}, 408: {}, 410: {}, 414: {}, + 417: {}, 418: {}, 428: {}, 422: {}, 426: {}, + 430: {}, 434: {}, 438: {}, 440: {}, 442: {}, + 446: {}, 450: {}, 454: {}, 458: {}, 462: {}, + 466: {}, 470: {}, 584: {}, 474: {}, 478: {}, + 480: {}, 175: {}, 484: {}, 583: {}, 498: {}, + 492: {}, 496: {}, 499: {}, 500: {}, 504: {}, + 508: {}, 104: {}, 516: {}, 520: {}, 524: {}, + 528: {}, 540: {}, 554: {}, 558: {}, 562: {}, + 566: {}, 570: {}, 574: {}, 807: {}, 580: {}, + 578: {}, 512: {}, 586: {}, 585: {}, 275: {}, + 591: {}, 598: {}, 600: {}, 604: {}, 608: {}, + 612: {}, 616: {}, 620: {}, 630: {}, 634: {}, + 642: {}, 643: {}, 646: {}, 638: {}, 652: {}, + 654: {}, 659: {}, 662: {}, 663: {}, 666: {}, + 670: {}, 882: {}, 674: {}, 678: {}, 682: {}, + 686: {}, 688: {}, 690: {}, 694: {}, 702: {}, + 534: {}, 703: {}, 705: {}, 90: {}, 706: {}, + 710: {}, 239: {}, 728: {}, 724: {}, 144: {}, + 729: {}, 740: {}, 744: {}, 752: {}, 756: {}, + 760: {}, 158: {}, 762: {}, 834: {}, 764: {}, + 626: {}, 768: {}, 772: {}, 776: {}, 780: {}, + 788: {}, 792: {}, 795: {}, 796: {}, 798: {}, + 800: {}, 804: {}, 784: {}, 826: {}, 581: {}, + 840: {}, 858: {}, 860: {}, 548: {}, 862: {}, + 704: {}, 92: {}, 850: {}, 876: {}, 732: {}, + 887: {}, 894: {}, 716: {}, 248: {}, 153: {}, +} + +var iso3166_1_alpha_numeric_eu = map[int]struct{}{ + 40: {}, 56: {}, 100: {}, 191: {}, 196: {}, + 200: {}, 208: {}, 233: {}, 246: {}, 250: {}, + 276: {}, 300: {}, 348: {}, 372: {}, 380: {}, + 428: {}, 440: {}, 442: {}, 470: {}, 528: {}, + 616: {}, 620: {}, 642: {}, 703: {}, 705: {}, + 724: {}, 752: {}, +} + +var iso3166_2 = map[string]struct{}{ + "AD-02": {}, "AD-03": {}, "AD-04": {}, "AD-05": {}, "AD-06": {}, + "AD-07": {}, "AD-08": {}, "AE-AJ": {}, "AE-AZ": {}, "AE-DU": {}, + "AE-FU": {}, "AE-RK": {}, "AE-SH": {}, "AE-UQ": {}, "AF-BAL": {}, + "AF-BAM": {}, "AF-BDG": {}, "AF-BDS": {}, "AF-BGL": {}, "AF-DAY": {}, + "AF-FRA": {}, "AF-FYB": {}, "AF-GHA": {}, "AF-GHO": {}, "AF-HEL": {}, + "AF-HER": {}, "AF-JOW": {}, "AF-KAB": {}, "AF-KAN": {}, "AF-KAP": {}, + "AF-KDZ": {}, "AF-KHO": {}, "AF-KNR": {}, "AF-LAG": {}, "AF-LOG": {}, + "AF-NAN": {}, "AF-NIM": {}, "AF-NUR": {}, "AF-PAN": {}, "AF-PAR": {}, + "AF-PIA": {}, "AF-PKA": {}, "AF-SAM": {}, "AF-SAR": {}, "AF-TAK": {}, + "AF-URU": {}, "AF-WAR": {}, "AF-ZAB": {}, "AG-03": {}, "AG-04": {}, + "AG-05": {}, "AG-06": {}, "AG-07": {}, "AG-08": {}, "AG-10": {}, + "AG-11": {}, "AL-01": {}, "AL-02": {}, "AL-03": {}, "AL-04": {}, + "AL-05": {}, "AL-06": {}, "AL-07": {}, "AL-08": {}, "AL-09": {}, + "AL-10": {}, "AL-11": {}, "AL-12": {}, "AL-BR": {}, "AL-BU": {}, + "AL-DI": {}, "AL-DL": {}, "AL-DR": {}, "AL-DV": {}, "AL-EL": {}, + "AL-ER": {}, "AL-FR": {}, "AL-GJ": {}, "AL-GR": {}, "AL-HA": {}, + "AL-KA": {}, "AL-KB": {}, "AL-KC": {}, "AL-KO": {}, "AL-KR": {}, + "AL-KU": {}, "AL-LB": {}, "AL-LE": {}, "AL-LU": {}, "AL-MK": {}, + "AL-MM": {}, "AL-MR": {}, "AL-MT": {}, "AL-PG": {}, "AL-PQ": {}, + "AL-PR": {}, "AL-PU": {}, "AL-SH": {}, "AL-SK": {}, "AL-SR": {}, + "AL-TE": {}, "AL-TP": {}, "AL-TR": {}, "AL-VL": {}, "AM-AG": {}, + "AM-AR": {}, "AM-AV": {}, "AM-ER": {}, "AM-GR": {}, "AM-KT": {}, + "AM-LO": {}, "AM-SH": {}, "AM-SU": {}, "AM-TV": {}, "AM-VD": {}, + "AO-BGO": {}, "AO-BGU": {}, "AO-BIE": {}, "AO-CAB": {}, "AO-CCU": {}, + "AO-CNN": {}, "AO-CNO": {}, "AO-CUS": {}, "AO-HUA": {}, "AO-HUI": {}, + "AO-LNO": {}, "AO-LSU": {}, "AO-LUA": {}, "AO-MAL": {}, "AO-MOX": {}, + "AO-NAM": {}, "AO-UIG": {}, "AO-ZAI": {}, "AR-A": {}, "AR-B": {}, + "AR-C": {}, "AR-D": {}, "AR-E": {}, "AR-F": {}, "AR-G": {}, "AR-H": {}, + "AR-J": {}, "AR-K": {}, "AR-L": {}, "AR-M": {}, "AR-N": {}, + "AR-P": {}, "AR-Q": {}, "AR-R": {}, "AR-S": {}, "AR-T": {}, + "AR-U": {}, "AR-V": {}, "AR-W": {}, "AR-X": {}, "AR-Y": {}, + "AR-Z": {}, "AT-1": {}, "AT-2": {}, "AT-3": {}, "AT-4": {}, + "AT-5": {}, "AT-6": {}, "AT-7": {}, "AT-8": {}, "AT-9": {}, + "AU-ACT": {}, "AU-NSW": {}, "AU-NT": {}, "AU-QLD": {}, "AU-SA": {}, + "AU-TAS": {}, "AU-VIC": {}, "AU-WA": {}, "AZ-ABS": {}, "AZ-AGA": {}, + "AZ-AGC": {}, "AZ-AGM": {}, "AZ-AGS": {}, "AZ-AGU": {}, "AZ-AST": {}, + "AZ-BA": {}, "AZ-BAB": {}, "AZ-BAL": {}, "AZ-BAR": {}, "AZ-BEY": {}, + "AZ-BIL": {}, "AZ-CAB": {}, "AZ-CAL": {}, "AZ-CUL": {}, "AZ-DAS": {}, + "AZ-FUZ": {}, "AZ-GA": {}, "AZ-GAD": {}, "AZ-GOR": {}, "AZ-GOY": {}, + "AZ-GYG": {}, "AZ-HAC": {}, "AZ-IMI": {}, "AZ-ISM": {}, "AZ-KAL": {}, + "AZ-KAN": {}, "AZ-KUR": {}, "AZ-LA": {}, "AZ-LAC": {}, "AZ-LAN": {}, + "AZ-LER": {}, "AZ-MAS": {}, "AZ-MI": {}, "AZ-NA": {}, "AZ-NEF": {}, + "AZ-NV": {}, "AZ-NX": {}, "AZ-OGU": {}, "AZ-ORD": {}, "AZ-QAB": {}, + "AZ-QAX": {}, "AZ-QAZ": {}, "AZ-QBA": {}, "AZ-QBI": {}, "AZ-QOB": {}, + "AZ-QUS": {}, "AZ-SA": {}, "AZ-SAB": {}, "AZ-SAD": {}, "AZ-SAH": {}, + "AZ-SAK": {}, "AZ-SAL": {}, "AZ-SAR": {}, "AZ-SAT": {}, "AZ-SBN": {}, + "AZ-SIY": {}, "AZ-SKR": {}, "AZ-SM": {}, "AZ-SMI": {}, "AZ-SMX": {}, + "AZ-SR": {}, "AZ-SUS": {}, "AZ-TAR": {}, "AZ-TOV": {}, "AZ-UCA": {}, + "AZ-XA": {}, "AZ-XAC": {}, "AZ-XCI": {}, "AZ-XIZ": {}, "AZ-XVD": {}, + "AZ-YAR": {}, "AZ-YE": {}, "AZ-YEV": {}, "AZ-ZAN": {}, "AZ-ZAQ": {}, + "AZ-ZAR": {}, "BA-01": {}, "BA-02": {}, "BA-03": {}, "BA-04": {}, + "BA-05": {}, "BA-06": {}, "BA-07": {}, "BA-08": {}, "BA-09": {}, + "BA-10": {}, "BA-BIH": {}, "BA-BRC": {}, "BA-SRP": {}, "BB-01": {}, + "BB-02": {}, "BB-03": {}, "BB-04": {}, "BB-05": {}, "BB-06": {}, + "BB-07": {}, "BB-08": {}, "BB-09": {}, "BB-10": {}, "BB-11": {}, + "BD-01": {}, "BD-02": {}, "BD-03": {}, "BD-04": {}, "BD-05": {}, + "BD-06": {}, "BD-07": {}, "BD-08": {}, "BD-09": {}, "BD-10": {}, + "BD-11": {}, "BD-12": {}, "BD-13": {}, "BD-14": {}, "BD-15": {}, + "BD-16": {}, "BD-17": {}, "BD-18": {}, "BD-19": {}, "BD-20": {}, + "BD-21": {}, "BD-22": {}, "BD-23": {}, "BD-24": {}, "BD-25": {}, + "BD-26": {}, "BD-27": {}, "BD-28": {}, "BD-29": {}, "BD-30": {}, + "BD-31": {}, "BD-32": {}, "BD-33": {}, "BD-34": {}, "BD-35": {}, + "BD-36": {}, "BD-37": {}, "BD-38": {}, "BD-39": {}, "BD-40": {}, + "BD-41": {}, "BD-42": {}, "BD-43": {}, "BD-44": {}, "BD-45": {}, + "BD-46": {}, "BD-47": {}, "BD-48": {}, "BD-49": {}, "BD-50": {}, + "BD-51": {}, "BD-52": {}, "BD-53": {}, "BD-54": {}, "BD-55": {}, + "BD-56": {}, "BD-57": {}, "BD-58": {}, "BD-59": {}, "BD-60": {}, + "BD-61": {}, "BD-62": {}, "BD-63": {}, "BD-64": {}, "BD-A": {}, + "BD-B": {}, "BD-C": {}, "BD-D": {}, "BD-E": {}, "BD-F": {}, + "BD-G": {}, "BE-BRU": {}, "BE-VAN": {}, "BE-VBR": {}, "BE-VLG": {}, + "BE-VLI": {}, "BE-VOV": {}, "BE-VWV": {}, "BE-WAL": {}, "BE-WBR": {}, + "BE-WHT": {}, "BE-WLG": {}, "BE-WLX": {}, "BE-WNA": {}, "BF-01": {}, + "BF-02": {}, "BF-03": {}, "BF-04": {}, "BF-05": {}, "BF-06": {}, + "BF-07": {}, "BF-08": {}, "BF-09": {}, "BF-10": {}, "BF-11": {}, + "BF-12": {}, "BF-13": {}, "BF-BAL": {}, "BF-BAM": {}, "BF-BAN": {}, + "BF-BAZ": {}, "BF-BGR": {}, "BF-BLG": {}, "BF-BLK": {}, "BF-COM": {}, + "BF-GAN": {}, "BF-GNA": {}, "BF-GOU": {}, "BF-HOU": {}, "BF-IOB": {}, + "BF-KAD": {}, "BF-KEN": {}, "BF-KMD": {}, "BF-KMP": {}, "BF-KOP": {}, + "BF-KOS": {}, "BF-KOT": {}, "BF-KOW": {}, "BF-LER": {}, "BF-LOR": {}, + "BF-MOU": {}, "BF-NAM": {}, "BF-NAO": {}, "BF-NAY": {}, "BF-NOU": {}, + "BF-OUB": {}, "BF-OUD": {}, "BF-PAS": {}, "BF-PON": {}, "BF-SEN": {}, + "BF-SIS": {}, "BF-SMT": {}, "BF-SNG": {}, "BF-SOM": {}, "BF-SOR": {}, + "BF-TAP": {}, "BF-TUI": {}, "BF-YAG": {}, "BF-YAT": {}, "BF-ZIR": {}, + "BF-ZON": {}, "BF-ZOU": {}, "BG-01": {}, "BG-02": {}, "BG-03": {}, + "BG-04": {}, "BG-05": {}, "BG-06": {}, "BG-07": {}, "BG-08": {}, + "BG-09": {}, "BG-10": {}, "BG-11": {}, "BG-12": {}, "BG-13": {}, + "BG-14": {}, "BG-15": {}, "BG-16": {}, "BG-17": {}, "BG-18": {}, + "BG-19": {}, "BG-20": {}, "BG-21": {}, "BG-22": {}, "BG-23": {}, + "BG-24": {}, "BG-25": {}, "BG-26": {}, "BG-27": {}, "BG-28": {}, + "BH-13": {}, "BH-14": {}, "BH-15": {}, "BH-16": {}, "BH-17": {}, + "BI-BB": {}, "BI-BL": {}, "BI-BM": {}, "BI-BR": {}, "BI-CA": {}, + "BI-CI": {}, "BI-GI": {}, "BI-KI": {}, "BI-KR": {}, "BI-KY": {}, + "BI-MA": {}, "BI-MU": {}, "BI-MW": {}, "BI-NG": {}, "BI-RM": {}, "BI-RT": {}, + "BI-RY": {}, "BJ-AK": {}, "BJ-AL": {}, "BJ-AQ": {}, "BJ-BO": {}, + "BJ-CO": {}, "BJ-DO": {}, "BJ-KO": {}, "BJ-LI": {}, "BJ-MO": {}, + "BJ-OU": {}, "BJ-PL": {}, "BJ-ZO": {}, "BN-BE": {}, "BN-BM": {}, + "BN-TE": {}, "BN-TU": {}, "BO-B": {}, "BO-C": {}, "BO-H": {}, + "BO-L": {}, "BO-N": {}, "BO-O": {}, "BO-P": {}, "BO-S": {}, + "BO-T": {}, "BQ-BO": {}, "BQ-SA": {}, "BQ-SE": {}, "BR-AC": {}, + "BR-AL": {}, "BR-AM": {}, "BR-AP": {}, "BR-BA": {}, "BR-CE": {}, + "BR-DF": {}, "BR-ES": {}, "BR-FN": {}, "BR-GO": {}, "BR-MA": {}, + "BR-MG": {}, "BR-MS": {}, "BR-MT": {}, "BR-PA": {}, "BR-PB": {}, + "BR-PE": {}, "BR-PI": {}, "BR-PR": {}, "BR-RJ": {}, "BR-RN": {}, + "BR-RO": {}, "BR-RR": {}, "BR-RS": {}, "BR-SC": {}, "BR-SE": {}, + "BR-SP": {}, "BR-TO": {}, "BS-AK": {}, "BS-BI": {}, "BS-BP": {}, + "BS-BY": {}, "BS-CE": {}, "BS-CI": {}, "BS-CK": {}, "BS-CO": {}, + "BS-CS": {}, "BS-EG": {}, "BS-EX": {}, "BS-FP": {}, "BS-GC": {}, + "BS-HI": {}, "BS-HT": {}, "BS-IN": {}, "BS-LI": {}, "BS-MC": {}, + "BS-MG": {}, "BS-MI": {}, "BS-NE": {}, "BS-NO": {}, "BS-NP": {}, "BS-NS": {}, + "BS-RC": {}, "BS-RI": {}, "BS-SA": {}, "BS-SE": {}, "BS-SO": {}, + "BS-SS": {}, "BS-SW": {}, "BS-WG": {}, "BT-11": {}, "BT-12": {}, + "BT-13": {}, "BT-14": {}, "BT-15": {}, "BT-21": {}, "BT-22": {}, + "BT-23": {}, "BT-24": {}, "BT-31": {}, "BT-32": {}, "BT-33": {}, + "BT-34": {}, "BT-41": {}, "BT-42": {}, "BT-43": {}, "BT-44": {}, + "BT-45": {}, "BT-GA": {}, "BT-TY": {}, "BW-CE": {}, "BW-CH": {}, "BW-GH": {}, + "BW-KG": {}, "BW-KL": {}, "BW-KW": {}, "BW-NE": {}, "BW-NW": {}, + "BW-SE": {}, "BW-SO": {}, "BY-BR": {}, "BY-HM": {}, "BY-HO": {}, + "BY-HR": {}, "BY-MA": {}, "BY-MI": {}, "BY-VI": {}, "BZ-BZ": {}, + "BZ-CY": {}, "BZ-CZL": {}, "BZ-OW": {}, "BZ-SC": {}, "BZ-TOL": {}, + "CA-AB": {}, "CA-BC": {}, "CA-MB": {}, "CA-NB": {}, "CA-NL": {}, + "CA-NS": {}, "CA-NT": {}, "CA-NU": {}, "CA-ON": {}, "CA-PE": {}, + "CA-QC": {}, "CA-SK": {}, "CA-YT": {}, "CD-BC": {}, "CD-BN": {}, + "CD-EQ": {}, "CD-HK": {}, "CD-IT": {}, "CD-KA": {}, "CD-KC": {}, "CD-KE": {}, "CD-KG": {}, "CD-KN": {}, + "CD-KW": {}, "CD-KS": {}, "CD-LU": {}, "CD-MA": {}, "CD-NK": {}, "CD-OR": {}, "CD-SA": {}, "CD-SK": {}, + "CD-TA": {}, "CD-TO": {}, "CF-AC": {}, "CF-BB": {}, "CF-BGF": {}, "CF-BK": {}, "CF-HK": {}, "CF-HM": {}, + "CF-HS": {}, "CF-KB": {}, "CF-KG": {}, "CF-LB": {}, "CF-MB": {}, + "CF-MP": {}, "CF-NM": {}, "CF-OP": {}, "CF-SE": {}, "CF-UK": {}, + "CF-VK": {}, "CG-11": {}, "CG-12": {}, "CG-13": {}, "CG-14": {}, + "CG-15": {}, "CG-16": {}, "CG-2": {}, "CG-5": {}, "CG-7": {}, "CG-8": {}, + "CG-9": {}, "CG-BZV": {}, "CH-AG": {}, "CH-AI": {}, "CH-AR": {}, + "CH-BE": {}, "CH-BL": {}, "CH-BS": {}, "CH-FR": {}, "CH-GE": {}, + "CH-GL": {}, "CH-GR": {}, "CH-JU": {}, "CH-LU": {}, "CH-NE": {}, + "CH-NW": {}, "CH-OW": {}, "CH-SG": {}, "CH-SH": {}, "CH-SO": {}, + "CH-SZ": {}, "CH-TG": {}, "CH-TI": {}, "CH-UR": {}, "CH-VD": {}, + "CH-VS": {}, "CH-ZG": {}, "CH-ZH": {}, "CI-AB": {}, "CI-BS": {}, + "CI-CM": {}, "CI-DN": {}, "CI-GD": {}, "CI-LC": {}, "CI-LG": {}, + "CI-MG": {}, "CI-SM": {}, "CI-SV": {}, "CI-VB": {}, "CI-WR": {}, + "CI-YM": {}, "CI-ZZ": {}, "CL-AI": {}, "CL-AN": {}, "CL-AP": {}, + "CL-AR": {}, "CL-AT": {}, "CL-BI": {}, "CL-CO": {}, "CL-LI": {}, + "CL-LL": {}, "CL-LR": {}, "CL-MA": {}, "CL-ML": {}, "CL-NB": {}, "CL-RM": {}, + "CL-TA": {}, "CL-VS": {}, "CM-AD": {}, "CM-CE": {}, "CM-EN": {}, + "CM-ES": {}, "CM-LT": {}, "CM-NO": {}, "CM-NW": {}, "CM-OU": {}, + "CM-SU": {}, "CM-SW": {}, "CN-AH": {}, "CN-BJ": {}, "CN-CQ": {}, + "CN-FJ": {}, "CN-GS": {}, "CN-GD": {}, "CN-GX": {}, "CN-GZ": {}, + "CN-HI": {}, "CN-HE": {}, "CN-HL": {}, "CN-HA": {}, "CN-HB": {}, + "CN-HN": {}, "CN-JS": {}, "CN-JX": {}, "CN-JL": {}, "CN-LN": {}, + "CN-NM": {}, "CN-NX": {}, "CN-QH": {}, "CN-SN": {}, "CN-SD": {}, "CN-SH": {}, + "CN-SX": {}, "CN-SC": {}, "CN-TJ": {}, "CN-XJ": {}, "CN-XZ": {}, "CN-YN": {}, + "CN-ZJ": {}, "CO-AMA": {}, "CO-ANT": {}, "CO-ARA": {}, "CO-ATL": {}, + "CO-BOL": {}, "CO-BOY": {}, "CO-CAL": {}, "CO-CAQ": {}, "CO-CAS": {}, + "CO-CAU": {}, "CO-CES": {}, "CO-CHO": {}, "CO-COR": {}, "CO-CUN": {}, + "CO-DC": {}, "CO-GUA": {}, "CO-GUV": {}, "CO-HUI": {}, "CO-LAG": {}, + "CO-MAG": {}, "CO-MET": {}, "CO-NAR": {}, "CO-NSA": {}, "CO-PUT": {}, + "CO-QUI": {}, "CO-RIS": {}, "CO-SAN": {}, "CO-SAP": {}, "CO-SUC": {}, + "CO-TOL": {}, "CO-VAC": {}, "CO-VAU": {}, "CO-VID": {}, "CR-A": {}, + "CR-C": {}, "CR-G": {}, "CR-H": {}, "CR-L": {}, "CR-P": {}, + "CR-SJ": {}, "CU-01": {}, "CU-02": {}, "CU-03": {}, "CU-04": {}, + "CU-05": {}, "CU-06": {}, "CU-07": {}, "CU-08": {}, "CU-09": {}, + "CU-10": {}, "CU-11": {}, "CU-12": {}, "CU-13": {}, "CU-14": {}, "CU-15": {}, + "CU-16": {}, "CU-99": {}, "CV-B": {}, "CV-BR": {}, "CV-BV": {}, "CV-CA": {}, + "CV-CF": {}, "CV-CR": {}, "CV-MA": {}, "CV-MO": {}, "CV-PA": {}, + "CV-PN": {}, "CV-PR": {}, "CV-RB": {}, "CV-RG": {}, "CV-RS": {}, + "CV-S": {}, "CV-SD": {}, "CV-SF": {}, "CV-SL": {}, "CV-SM": {}, + "CV-SO": {}, "CV-SS": {}, "CV-SV": {}, "CV-TA": {}, "CV-TS": {}, + "CY-01": {}, "CY-02": {}, "CY-03": {}, "CY-04": {}, "CY-05": {}, + "CY-06": {}, "CZ-10": {}, "CZ-101": {}, "CZ-102": {}, "CZ-103": {}, + "CZ-104": {}, "CZ-105": {}, "CZ-106": {}, "CZ-107": {}, "CZ-108": {}, + "CZ-109": {}, "CZ-110": {}, "CZ-111": {}, "CZ-112": {}, "CZ-113": {}, + "CZ-114": {}, "CZ-115": {}, "CZ-116": {}, "CZ-117": {}, "CZ-118": {}, + "CZ-119": {}, "CZ-120": {}, "CZ-121": {}, "CZ-122": {}, "CZ-20": {}, + "CZ-201": {}, "CZ-202": {}, "CZ-203": {}, "CZ-204": {}, "CZ-205": {}, + "CZ-206": {}, "CZ-207": {}, "CZ-208": {}, "CZ-209": {}, "CZ-20A": {}, + "CZ-20B": {}, "CZ-20C": {}, "CZ-31": {}, "CZ-311": {}, "CZ-312": {}, + "CZ-313": {}, "CZ-314": {}, "CZ-315": {}, "CZ-316": {}, "CZ-317": {}, + "CZ-32": {}, "CZ-321": {}, "CZ-322": {}, "CZ-323": {}, "CZ-324": {}, + "CZ-325": {}, "CZ-326": {}, "CZ-327": {}, "CZ-41": {}, "CZ-411": {}, + "CZ-412": {}, "CZ-413": {}, "CZ-42": {}, "CZ-421": {}, "CZ-422": {}, + "CZ-423": {}, "CZ-424": {}, "CZ-425": {}, "CZ-426": {}, "CZ-427": {}, + "CZ-51": {}, "CZ-511": {}, "CZ-512": {}, "CZ-513": {}, "CZ-514": {}, + "CZ-52": {}, "CZ-521": {}, "CZ-522": {}, "CZ-523": {}, "CZ-524": {}, + "CZ-525": {}, "CZ-53": {}, "CZ-531": {}, "CZ-532": {}, "CZ-533": {}, + "CZ-534": {}, "CZ-63": {}, "CZ-631": {}, "CZ-632": {}, "CZ-633": {}, + "CZ-634": {}, "CZ-635": {}, "CZ-64": {}, "CZ-641": {}, "CZ-642": {}, + "CZ-643": {}, "CZ-644": {}, "CZ-645": {}, "CZ-646": {}, "CZ-647": {}, + "CZ-71": {}, "CZ-711": {}, "CZ-712": {}, "CZ-713": {}, "CZ-714": {}, + "CZ-715": {}, "CZ-72": {}, "CZ-721": {}, "CZ-722": {}, "CZ-723": {}, + "CZ-724": {}, "CZ-80": {}, "CZ-801": {}, "CZ-802": {}, "CZ-803": {}, + "CZ-804": {}, "CZ-805": {}, "CZ-806": {}, "DE-BB": {}, "DE-BE": {}, + "DE-BW": {}, "DE-BY": {}, "DE-HB": {}, "DE-HE": {}, "DE-HH": {}, + "DE-MV": {}, "DE-NI": {}, "DE-NW": {}, "DE-RP": {}, "DE-SH": {}, + "DE-SL": {}, "DE-SN": {}, "DE-ST": {}, "DE-TH": {}, "DJ-AR": {}, + "DJ-AS": {}, "DJ-DI": {}, "DJ-DJ": {}, "DJ-OB": {}, "DJ-TA": {}, + "DK-81": {}, "DK-82": {}, "DK-83": {}, "DK-84": {}, "DK-85": {}, + "DM-01": {}, "DM-02": {}, "DM-03": {}, "DM-04": {}, "DM-05": {}, + "DM-06": {}, "DM-07": {}, "DM-08": {}, "DM-09": {}, "DM-10": {}, + "DO-01": {}, "DO-02": {}, "DO-03": {}, "DO-04": {}, "DO-05": {}, + "DO-06": {}, "DO-07": {}, "DO-08": {}, "DO-09": {}, "DO-10": {}, + "DO-11": {}, "DO-12": {}, "DO-13": {}, "DO-14": {}, "DO-15": {}, + "DO-16": {}, "DO-17": {}, "DO-18": {}, "DO-19": {}, "DO-20": {}, + "DO-21": {}, "DO-22": {}, "DO-23": {}, "DO-24": {}, "DO-25": {}, + "DO-26": {}, "DO-27": {}, "DO-28": {}, "DO-29": {}, "DO-30": {}, "DO-31": {}, + "DZ-01": {}, "DZ-02": {}, "DZ-03": {}, "DZ-04": {}, "DZ-05": {}, + "DZ-06": {}, "DZ-07": {}, "DZ-08": {}, "DZ-09": {}, "DZ-10": {}, + "DZ-11": {}, "DZ-12": {}, "DZ-13": {}, "DZ-14": {}, "DZ-15": {}, + "DZ-16": {}, "DZ-17": {}, "DZ-18": {}, "DZ-19": {}, "DZ-20": {}, + "DZ-21": {}, "DZ-22": {}, "DZ-23": {}, "DZ-24": {}, "DZ-25": {}, + "DZ-26": {}, "DZ-27": {}, "DZ-28": {}, "DZ-29": {}, "DZ-30": {}, + "DZ-31": {}, "DZ-32": {}, "DZ-33": {}, "DZ-34": {}, "DZ-35": {}, + "DZ-36": {}, "DZ-37": {}, "DZ-38": {}, "DZ-39": {}, "DZ-40": {}, + "DZ-41": {}, "DZ-42": {}, "DZ-43": {}, "DZ-44": {}, "DZ-45": {}, + "DZ-46": {}, "DZ-47": {}, "DZ-48": {}, "DZ-49": {}, "DZ-51": {}, + "DZ-53": {}, "DZ-55": {}, "DZ-56": {}, "DZ-57": {}, "EC-A": {}, "EC-B": {}, + "EC-C": {}, "EC-D": {}, "EC-E": {}, "EC-F": {}, "EC-G": {}, + "EC-H": {}, "EC-I": {}, "EC-L": {}, "EC-M": {}, "EC-N": {}, + "EC-O": {}, "EC-P": {}, "EC-R": {}, "EC-S": {}, "EC-SD": {}, + "EC-SE": {}, "EC-T": {}, "EC-U": {}, "EC-W": {}, "EC-X": {}, + "EC-Y": {}, "EC-Z": {}, "EE-37": {}, "EE-39": {}, "EE-44": {}, "EE-45": {}, + "EE-49": {}, "EE-50": {}, "EE-51": {}, "EE-52": {}, "EE-56": {}, "EE-57": {}, + "EE-59": {}, "EE-60": {}, "EE-64": {}, "EE-65": {}, "EE-67": {}, "EE-68": {}, + "EE-70": {}, "EE-71": {}, "EE-74": {}, "EE-78": {}, "EE-79": {}, "EE-81": {}, "EE-82": {}, + "EE-84": {}, "EE-86": {}, "EE-87": {}, "EG-ALX": {}, "EG-ASN": {}, "EG-AST": {}, + "EG-BA": {}, "EG-BH": {}, "EG-BNS": {}, "EG-C": {}, "EG-DK": {}, + "EG-DT": {}, "EG-FYM": {}, "EG-GH": {}, "EG-GZ": {}, "EG-HU": {}, + "EG-IS": {}, "EG-JS": {}, "EG-KB": {}, "EG-KFS": {}, "EG-KN": {}, + "EG-LX": {}, "EG-MN": {}, "EG-MNF": {}, "EG-MT": {}, "EG-PTS": {}, "EG-SHG": {}, + "EG-SHR": {}, "EG-SIN": {}, "EG-SU": {}, "EG-SUZ": {}, "EG-WAD": {}, + "ER-AN": {}, "ER-DK": {}, "ER-DU": {}, "ER-GB": {}, "ER-MA": {}, + "ER-SK": {}, "ES-A": {}, "ES-AB": {}, "ES-AL": {}, "ES-AN": {}, + "ES-AR": {}, "ES-AS": {}, "ES-AV": {}, "ES-B": {}, "ES-BA": {}, + "ES-BI": {}, "ES-BU": {}, "ES-C": {}, "ES-CA": {}, "ES-CB": {}, + "ES-CC": {}, "ES-CE": {}, "ES-CL": {}, "ES-CM": {}, "ES-CN": {}, + "ES-CO": {}, "ES-CR": {}, "ES-CS": {}, "ES-CT": {}, "ES-CU": {}, + "ES-EX": {}, "ES-GA": {}, "ES-GC": {}, "ES-GI": {}, "ES-GR": {}, + "ES-GU": {}, "ES-H": {}, "ES-HU": {}, "ES-IB": {}, "ES-J": {}, + "ES-L": {}, "ES-LE": {}, "ES-LO": {}, "ES-LU": {}, "ES-M": {}, + "ES-MA": {}, "ES-MC": {}, "ES-MD": {}, "ES-ML": {}, "ES-MU": {}, + "ES-NA": {}, "ES-NC": {}, "ES-O": {}, "ES-OR": {}, "ES-P": {}, + "ES-PM": {}, "ES-PO": {}, "ES-PV": {}, "ES-RI": {}, "ES-S": {}, + "ES-SA": {}, "ES-SE": {}, "ES-SG": {}, "ES-SO": {}, "ES-SS": {}, + "ES-T": {}, "ES-TE": {}, "ES-TF": {}, "ES-TO": {}, "ES-V": {}, + "ES-VA": {}, "ES-VC": {}, "ES-VI": {}, "ES-Z": {}, "ES-ZA": {}, + "ET-AA": {}, "ET-AF": {}, "ET-AM": {}, "ET-BE": {}, "ET-DD": {}, + "ET-GA": {}, "ET-HA": {}, "ET-OR": {}, "ET-SN": {}, "ET-SO": {}, + "ET-TI": {}, "FI-01": {}, "FI-02": {}, "FI-03": {}, "FI-04": {}, + "FI-05": {}, "FI-06": {}, "FI-07": {}, "FI-08": {}, "FI-09": {}, + "FI-10": {}, "FI-11": {}, "FI-12": {}, "FI-13": {}, "FI-14": {}, + "FI-15": {}, "FI-16": {}, "FI-17": {}, "FI-18": {}, "FI-19": {}, + "FJ-C": {}, "FJ-E": {}, "FJ-N": {}, "FJ-R": {}, "FJ-W": {}, + "FM-KSA": {}, "FM-PNI": {}, "FM-TRK": {}, "FM-YAP": {}, "FR-01": {}, + "FR-02": {}, "FR-03": {}, "FR-04": {}, "FR-05": {}, "FR-06": {}, + "FR-07": {}, "FR-08": {}, "FR-09": {}, "FR-10": {}, "FR-11": {}, + "FR-12": {}, "FR-13": {}, "FR-14": {}, "FR-15": {}, "FR-16": {}, + "FR-17": {}, "FR-18": {}, "FR-19": {}, "FR-20R": {}, "FR-21": {}, "FR-22": {}, + "FR-23": {}, "FR-24": {}, "FR-25": {}, "FR-26": {}, "FR-27": {}, + "FR-28": {}, "FR-29": {}, "FR-2A": {}, "FR-2B": {}, "FR-30": {}, + "FR-31": {}, "FR-32": {}, "FR-33": {}, "FR-34": {}, "FR-35": {}, + "FR-36": {}, "FR-37": {}, "FR-38": {}, "FR-39": {}, "FR-40": {}, + "FR-41": {}, "FR-42": {}, "FR-43": {}, "FR-44": {}, "FR-45": {}, + "FR-46": {}, "FR-47": {}, "FR-48": {}, "FR-49": {}, "FR-50": {}, + "FR-51": {}, "FR-52": {}, "FR-53": {}, "FR-54": {}, "FR-55": {}, + "FR-56": {}, "FR-57": {}, "FR-58": {}, "FR-59": {}, "FR-60": {}, + "FR-61": {}, "FR-62": {}, "FR-63": {}, "FR-64": {}, "FR-65": {}, + "FR-66": {}, "FR-67": {}, "FR-68": {}, "FR-69": {}, "FR-70": {}, + "FR-71": {}, "FR-72": {}, "FR-73": {}, "FR-74": {}, "FR-75": {}, + "FR-76": {}, "FR-77": {}, "FR-78": {}, "FR-79": {}, "FR-80": {}, + "FR-81": {}, "FR-82": {}, "FR-83": {}, "FR-84": {}, "FR-85": {}, + "FR-86": {}, "FR-87": {}, "FR-88": {}, "FR-89": {}, "FR-90": {}, + "FR-91": {}, "FR-92": {}, "FR-93": {}, "FR-94": {}, "FR-95": {}, + "FR-ARA": {}, "FR-BFC": {}, "FR-BL": {}, "FR-BRE": {}, "FR-COR": {}, + "FR-CP": {}, "FR-CVL": {}, "FR-GES": {}, "FR-GF": {}, "FR-GP": {}, + "FR-GUA": {}, "FR-HDF": {}, "FR-IDF": {}, "FR-LRE": {}, "FR-MAY": {}, + "FR-MF": {}, "FR-MQ": {}, "FR-NAQ": {}, "FR-NC": {}, "FR-NOR": {}, + "FR-OCC": {}, "FR-PAC": {}, "FR-PDL": {}, "FR-PF": {}, "FR-PM": {}, + "FR-RE": {}, "FR-TF": {}, "FR-WF": {}, "FR-YT": {}, "GA-1": {}, + "GA-2": {}, "GA-3": {}, "GA-4": {}, "GA-5": {}, "GA-6": {}, + "GA-7": {}, "GA-8": {}, "GA-9": {}, "GB-ABC": {}, "GB-ABD": {}, + "GB-ABE": {}, "GB-AGB": {}, "GB-AGY": {}, "GB-AND": {}, "GB-ANN": {}, + "GB-ANS": {}, "GB-BAS": {}, "GB-BBD": {}, "GB-BDF": {}, "GB-BDG": {}, + "GB-BEN": {}, "GB-BEX": {}, "GB-BFS": {}, "GB-BGE": {}, "GB-BGW": {}, + "GB-BIR": {}, "GB-BKM": {}, "GB-BMH": {}, "GB-BNE": {}, "GB-BNH": {}, + "GB-BNS": {}, "GB-BOL": {}, "GB-BPL": {}, "GB-BRC": {}, "GB-BRD": {}, + "GB-BRY": {}, "GB-BST": {}, "GB-BUR": {}, "GB-CAM": {}, "GB-CAY": {}, + "GB-CBF": {}, "GB-CCG": {}, "GB-CGN": {}, "GB-CHE": {}, "GB-CHW": {}, + "GB-CLD": {}, "GB-CLK": {}, "GB-CMA": {}, "GB-CMD": {}, "GB-CMN": {}, + "GB-CON": {}, "GB-COV": {}, "GB-CRF": {}, "GB-CRY": {}, "GB-CWY": {}, + "GB-DAL": {}, "GB-DBY": {}, "GB-DEN": {}, "GB-DER": {}, "GB-DEV": {}, + "GB-DGY": {}, "GB-DNC": {}, "GB-DND": {}, "GB-DOR": {}, "GB-DRS": {}, + "GB-DUD": {}, "GB-DUR": {}, "GB-EAL": {}, "GB-EAW": {}, "GB-EAY": {}, + "GB-EDH": {}, "GB-EDU": {}, "GB-ELN": {}, "GB-ELS": {}, "GB-ENF": {}, + "GB-ENG": {}, "GB-ERW": {}, "GB-ERY": {}, "GB-ESS": {}, "GB-ESX": {}, + "GB-FAL": {}, "GB-FIF": {}, "GB-FLN": {}, "GB-FMO": {}, "GB-GAT": {}, + "GB-GBN": {}, "GB-GLG": {}, "GB-GLS": {}, "GB-GRE": {}, "GB-GWN": {}, + "GB-HAL": {}, "GB-HAM": {}, "GB-HAV": {}, "GB-HCK": {}, "GB-HEF": {}, + "GB-HIL": {}, "GB-HLD": {}, "GB-HMF": {}, "GB-HNS": {}, "GB-HPL": {}, + "GB-HRT": {}, "GB-HRW": {}, "GB-HRY": {}, "GB-IOS": {}, "GB-IOW": {}, + "GB-ISL": {}, "GB-IVC": {}, "GB-KEC": {}, "GB-KEN": {}, "GB-KHL": {}, + "GB-KIR": {}, "GB-KTT": {}, "GB-KWL": {}, "GB-LAN": {}, "GB-LBC": {}, + "GB-LBH": {}, "GB-LCE": {}, "GB-LDS": {}, "GB-LEC": {}, "GB-LEW": {}, + "GB-LIN": {}, "GB-LIV": {}, "GB-LND": {}, "GB-LUT": {}, "GB-MAN": {}, + "GB-MDB": {}, "GB-MDW": {}, "GB-MEA": {}, "GB-MIK": {}, "GD-01": {}, + "GB-MLN": {}, "GB-MON": {}, "GB-MRT": {}, "GB-MRY": {}, "GB-MTY": {}, + "GB-MUL": {}, "GB-NAY": {}, "GB-NBL": {}, "GB-NEL": {}, "GB-NET": {}, + "GB-NFK": {}, "GB-NGM": {}, "GB-NIR": {}, "GB-NLK": {}, "GB-NLN": {}, + "GB-NMD": {}, "GB-NSM": {}, "GB-NTH": {}, "GB-NTL": {}, "GB-NTT": {}, + "GB-NTY": {}, "GB-NWM": {}, "GB-NWP": {}, "GB-NYK": {}, "GB-OLD": {}, + "GB-ORK": {}, "GB-OXF": {}, "GB-PEM": {}, "GB-PKN": {}, "GB-PLY": {}, + "GB-POL": {}, "GB-POR": {}, "GB-POW": {}, "GB-PTE": {}, "GB-RCC": {}, + "GB-RCH": {}, "GB-RCT": {}, "GB-RDB": {}, "GB-RDG": {}, "GB-RFW": {}, + "GB-RIC": {}, "GB-ROT": {}, "GB-RUT": {}, "GB-SAW": {}, "GB-SAY": {}, + "GB-SCB": {}, "GB-SCT": {}, "GB-SFK": {}, "GB-SFT": {}, "GB-SGC": {}, + "GB-SHF": {}, "GB-SHN": {}, "GB-SHR": {}, "GB-SKP": {}, "GB-SLF": {}, + "GB-SLG": {}, "GB-SLK": {}, "GB-SND": {}, "GB-SOL": {}, "GB-SOM": {}, + "GB-SOS": {}, "GB-SRY": {}, "GB-STE": {}, "GB-STG": {}, "GB-STH": {}, + "GB-STN": {}, "GB-STS": {}, "GB-STT": {}, "GB-STY": {}, "GB-SWA": {}, + "GB-SWD": {}, "GB-SWK": {}, "GB-TAM": {}, "GB-TFW": {}, "GB-THR": {}, + "GB-TOB": {}, "GB-TOF": {}, "GB-TRF": {}, "GB-TWH": {}, "GB-UKM": {}, + "GB-VGL": {}, "GB-WAR": {}, "GB-WBK": {}, "GB-WDU": {}, "GB-WFT": {}, + "GB-WGN": {}, "GB-WIL": {}, "GB-WKF": {}, "GB-WLL": {}, "GB-WLN": {}, + "GB-WLS": {}, "GB-WLV": {}, "GB-WND": {}, "GB-WNM": {}, "GB-WOK": {}, + "GB-WOR": {}, "GB-WRL": {}, "GB-WRT": {}, "GB-WRX": {}, "GB-WSM": {}, + "GB-WSX": {}, "GB-YOR": {}, "GB-ZET": {}, "GD-02": {}, "GD-03": {}, + "GD-04": {}, "GD-05": {}, "GD-06": {}, "GD-10": {}, "GE-AB": {}, + "GE-AJ": {}, "GE-GU": {}, "GE-IM": {}, "GE-KA": {}, "GE-KK": {}, + "GE-MM": {}, "GE-RL": {}, "GE-SJ": {}, "GE-SK": {}, "GE-SZ": {}, + "GE-TB": {}, "GH-AA": {}, "GH-AH": {}, "GH-AF": {}, "GH-BA": {}, "GH-BO": {}, "GH-BE": {}, "GH-CP": {}, + "GH-EP": {}, "GH-NP": {}, "GH-TV": {}, "GH-UE": {}, "GH-UW": {}, + "GH-WP": {}, "GL-AV": {}, "GL-KU": {}, "GL-QA": {}, "GL-QT": {}, "GL-QE": {}, "GL-SM": {}, + "GM-B": {}, "GM-L": {}, "GM-M": {}, "GM-N": {}, "GM-U": {}, + "GM-W": {}, "GN-B": {}, "GN-BE": {}, "GN-BF": {}, "GN-BK": {}, + "GN-C": {}, "GN-CO": {}, "GN-D": {}, "GN-DB": {}, "GN-DI": {}, + "GN-DL": {}, "GN-DU": {}, "GN-F": {}, "GN-FA": {}, "GN-FO": {}, + "GN-FR": {}, "GN-GA": {}, "GN-GU": {}, "GN-K": {}, "GN-KA": {}, + "GN-KB": {}, "GN-KD": {}, "GN-KE": {}, "GN-KN": {}, "GN-KO": {}, + "GN-KS": {}, "GN-L": {}, "GN-LA": {}, "GN-LE": {}, "GN-LO": {}, + "GN-M": {}, "GN-MC": {}, "GN-MD": {}, "GN-ML": {}, "GN-MM": {}, + "GN-N": {}, "GN-NZ": {}, "GN-PI": {}, "GN-SI": {}, "GN-TE": {}, + "GN-TO": {}, "GN-YO": {}, "GQ-AN": {}, "GQ-BN": {}, "GQ-BS": {}, + "GQ-C": {}, "GQ-CS": {}, "GQ-I": {}, "GQ-KN": {}, "GQ-LI": {}, + "GQ-WN": {}, "GR-01": {}, "GR-03": {}, "GR-04": {}, "GR-05": {}, + "GR-06": {}, "GR-07": {}, "GR-11": {}, "GR-12": {}, "GR-13": {}, + "GR-14": {}, "GR-15": {}, "GR-16": {}, "GR-17": {}, "GR-21": {}, + "GR-22": {}, "GR-23": {}, "GR-24": {}, "GR-31": {}, "GR-32": {}, + "GR-33": {}, "GR-34": {}, "GR-41": {}, "GR-42": {}, "GR-43": {}, + "GR-44": {}, "GR-51": {}, "GR-52": {}, "GR-53": {}, "GR-54": {}, + "GR-55": {}, "GR-56": {}, "GR-57": {}, "GR-58": {}, "GR-59": {}, + "GR-61": {}, "GR-62": {}, "GR-63": {}, "GR-64": {}, "GR-69": {}, + "GR-71": {}, "GR-72": {}, "GR-73": {}, "GR-81": {}, "GR-82": {}, + "GR-83": {}, "GR-84": {}, "GR-85": {}, "GR-91": {}, "GR-92": {}, + "GR-93": {}, "GR-94": {}, "GR-A": {}, "GR-A1": {}, "GR-B": {}, + "GR-C": {}, "GR-D": {}, "GR-E": {}, "GR-F": {}, "GR-G": {}, + "GR-H": {}, "GR-I": {}, "GR-J": {}, "GR-K": {}, "GR-L": {}, + "GR-M": {}, "GT-01": {}, "GT-02": {}, "GT-03": {}, "GT-04": {}, + "GT-05": {}, "GT-06": {}, "GT-07": {}, "GT-08": {}, "GT-09": {}, + "GT-10": {}, "GT-11": {}, "GT-12": {}, "GT-13": {}, "GT-14": {}, + "GT-15": {}, "GT-16": {}, "GT-17": {}, "GT-18": {}, "GT-19": {}, + "GT-20": {}, "GT-21": {}, "GT-22": {}, "GW-BA": {}, "GW-BL": {}, + "GW-BM": {}, "GW-BS": {}, "GW-CA": {}, "GW-GA": {}, "GW-L": {}, + "GW-N": {}, "GW-OI": {}, "GW-QU": {}, "GW-S": {}, "GW-TO": {}, + "GY-BA": {}, "GY-CU": {}, "GY-DE": {}, "GY-EB": {}, "GY-ES": {}, + "GY-MA": {}, "GY-PM": {}, "GY-PT": {}, "GY-UD": {}, "GY-UT": {}, + "HN-AT": {}, "HN-CH": {}, "HN-CL": {}, "HN-CM": {}, "HN-CP": {}, + "HN-CR": {}, "HN-EP": {}, "HN-FM": {}, "HN-GD": {}, "HN-IB": {}, + "HN-IN": {}, "HN-LE": {}, "HN-LP": {}, "HN-OC": {}, "HN-OL": {}, + "HN-SB": {}, "HN-VA": {}, "HN-YO": {}, "HR-01": {}, "HR-02": {}, + "HR-03": {}, "HR-04": {}, "HR-05": {}, "HR-06": {}, "HR-07": {}, + "HR-08": {}, "HR-09": {}, "HR-10": {}, "HR-11": {}, "HR-12": {}, + "HR-13": {}, "HR-14": {}, "HR-15": {}, "HR-16": {}, "HR-17": {}, + "HR-18": {}, "HR-19": {}, "HR-20": {}, "HR-21": {}, "HT-AR": {}, + "HT-CE": {}, "HT-GA": {}, "HT-ND": {}, "HT-NE": {}, "HT-NO": {}, "HT-NI": {}, + "HT-OU": {}, "HT-SD": {}, "HT-SE": {}, "HU-BA": {}, "HU-BC": {}, + "HU-BE": {}, "HU-BK": {}, "HU-BU": {}, "HU-BZ": {}, "HU-CS": {}, + "HU-DE": {}, "HU-DU": {}, "HU-EG": {}, "HU-ER": {}, "HU-FE": {}, + "HU-GS": {}, "HU-GY": {}, "HU-HB": {}, "HU-HE": {}, "HU-HV": {}, + "HU-JN": {}, "HU-KE": {}, "HU-KM": {}, "HU-KV": {}, "HU-MI": {}, + "HU-NK": {}, "HU-NO": {}, "HU-NY": {}, "HU-PE": {}, "HU-PS": {}, + "HU-SD": {}, "HU-SF": {}, "HU-SH": {}, "HU-SK": {}, "HU-SN": {}, + "HU-SO": {}, "HU-SS": {}, "HU-ST": {}, "HU-SZ": {}, "HU-TB": {}, + "HU-TO": {}, "HU-VA": {}, "HU-VE": {}, "HU-VM": {}, "HU-ZA": {}, + "HU-ZE": {}, "ID-AC": {}, "ID-BA": {}, "ID-BB": {}, "ID-BE": {}, + "ID-BT": {}, "ID-GO": {}, "ID-IJ": {}, "ID-JA": {}, "ID-JB": {}, + "ID-JI": {}, "ID-JK": {}, "ID-JT": {}, "ID-JW": {}, "ID-KA": {}, + "ID-KB": {}, "ID-KI": {}, "ID-KU": {}, "ID-KR": {}, "ID-KS": {}, + "ID-KT": {}, "ID-LA": {}, "ID-MA": {}, "ID-ML": {}, "ID-MU": {}, + "ID-NB": {}, "ID-NT": {}, "ID-NU": {}, "ID-PA": {}, "ID-PB": {}, + "ID-PE": {}, "ID-PP": {}, "ID-PS": {}, "ID-PT": {}, "ID-RI": {}, + "ID-SA": {}, "ID-SB": {}, "ID-SG": {}, "ID-SL": {}, "ID-SM": {}, + "ID-SN": {}, "ID-SR": {}, "ID-SS": {}, "ID-ST": {}, "ID-SU": {}, + "ID-YO": {}, "IE-C": {}, "IE-CE": {}, "IE-CN": {}, "IE-CO": {}, + "IE-CW": {}, "IE-D": {}, "IE-DL": {}, "IE-G": {}, "IE-KE": {}, + "IE-KK": {}, "IE-KY": {}, "IE-L": {}, "IE-LD": {}, "IE-LH": {}, + "IE-LK": {}, "IE-LM": {}, "IE-LS": {}, "IE-M": {}, "IE-MH": {}, + "IE-MN": {}, "IE-MO": {}, "IE-OY": {}, "IE-RN": {}, "IE-SO": {}, + "IE-TA": {}, "IE-U": {}, "IE-WD": {}, "IE-WH": {}, "IE-WW": {}, + "IE-WX": {}, "IL-D": {}, "IL-HA": {}, "IL-JM": {}, "IL-M": {}, + "IL-TA": {}, "IL-Z": {}, "IN-AN": {}, "IN-AP": {}, "IN-AR": {}, + "IN-AS": {}, "IN-BR": {}, "IN-CH": {}, "IN-CT": {}, "IN-DH": {}, + "IN-DL": {}, "IN-DN": {}, "IN-GA": {}, "IN-GJ": {}, "IN-HP": {}, + "IN-HR": {}, "IN-JH": {}, "IN-JK": {}, "IN-KA": {}, "IN-KL": {}, + "IN-LD": {}, "IN-MH": {}, "IN-ML": {}, "IN-MN": {}, "IN-MP": {}, + "IN-MZ": {}, "IN-NL": {}, "IN-TG": {}, "IN-OR": {}, "IN-PB": {}, "IN-PY": {}, + "IN-RJ": {}, "IN-SK": {}, "IN-TN": {}, "IN-TR": {}, "IN-UP": {}, + "IN-UT": {}, "IN-WB": {}, "IQ-AN": {}, "IQ-AR": {}, "IQ-BA": {}, + "IQ-BB": {}, "IQ-BG": {}, "IQ-DA": {}, "IQ-DI": {}, "IQ-DQ": {}, + "IQ-KA": {}, "IQ-KI": {}, "IQ-MA": {}, "IQ-MU": {}, "IQ-NA": {}, "IQ-NI": {}, + "IQ-QA": {}, "IQ-SD": {}, "IQ-SW": {}, "IQ-SU": {}, "IQ-TS": {}, "IQ-WA": {}, + "IR-00": {}, "IR-01": {}, "IR-02": {}, "IR-03": {}, "IR-04": {}, "IR-05": {}, + "IR-06": {}, "IR-07": {}, "IR-08": {}, "IR-09": {}, "IR-10": {}, "IR-11": {}, + "IR-12": {}, "IR-13": {}, "IR-14": {}, "IR-15": {}, "IR-16": {}, + "IR-17": {}, "IR-18": {}, "IR-19": {}, "IR-20": {}, "IR-21": {}, + "IR-22": {}, "IR-23": {}, "IR-24": {}, "IR-25": {}, "IR-26": {}, + "IR-27": {}, "IR-28": {}, "IR-29": {}, "IR-30": {}, "IR-31": {}, + "IS-0": {}, "IS-1": {}, "IS-2": {}, "IS-3": {}, "IS-4": {}, + "IS-5": {}, "IS-6": {}, "IS-7": {}, "IS-8": {}, "IT-21": {}, + "IT-23": {}, "IT-25": {}, "IT-32": {}, "IT-34": {}, "IT-36": {}, + "IT-42": {}, "IT-45": {}, "IT-52": {}, "IT-55": {}, "IT-57": {}, + "IT-62": {}, "IT-65": {}, "IT-67": {}, "IT-72": {}, "IT-75": {}, + "IT-77": {}, "IT-78": {}, "IT-82": {}, "IT-88": {}, "IT-AG": {}, + "IT-AL": {}, "IT-AN": {}, "IT-AO": {}, "IT-AP": {}, "IT-AQ": {}, + "IT-AR": {}, "IT-AT": {}, "IT-AV": {}, "IT-BA": {}, "IT-BG": {}, + "IT-BI": {}, "IT-BL": {}, "IT-BN": {}, "IT-BO": {}, "IT-BR": {}, + "IT-BS": {}, "IT-BT": {}, "IT-BZ": {}, "IT-CA": {}, "IT-CB": {}, + "IT-CE": {}, "IT-CH": {}, "IT-CI": {}, "IT-CL": {}, "IT-CN": {}, + "IT-CO": {}, "IT-CR": {}, "IT-CS": {}, "IT-CT": {}, "IT-CZ": {}, + "IT-EN": {}, "IT-FC": {}, "IT-FE": {}, "IT-FG": {}, "IT-FI": {}, + "IT-FM": {}, "IT-FR": {}, "IT-GE": {}, "IT-GO": {}, "IT-GR": {}, + "IT-IM": {}, "IT-IS": {}, "IT-KR": {}, "IT-LC": {}, "IT-LE": {}, + "IT-LI": {}, "IT-LO": {}, "IT-LT": {}, "IT-LU": {}, "IT-MB": {}, + "IT-MC": {}, "IT-ME": {}, "IT-MI": {}, "IT-MN": {}, "IT-MO": {}, + "IT-MS": {}, "IT-MT": {}, "IT-NA": {}, "IT-NO": {}, "IT-NU": {}, + "IT-OG": {}, "IT-OR": {}, "IT-OT": {}, "IT-PA": {}, "IT-PC": {}, + "IT-PD": {}, "IT-PE": {}, "IT-PG": {}, "IT-PI": {}, "IT-PN": {}, + "IT-PO": {}, "IT-PR": {}, "IT-PT": {}, "IT-PU": {}, "IT-PV": {}, + "IT-PZ": {}, "IT-RA": {}, "IT-RC": {}, "IT-RE": {}, "IT-RG": {}, + "IT-RI": {}, "IT-RM": {}, "IT-RN": {}, "IT-RO": {}, "IT-SA": {}, + "IT-SI": {}, "IT-SO": {}, "IT-SP": {}, "IT-SR": {}, "IT-SS": {}, + "IT-SV": {}, "IT-TA": {}, "IT-TE": {}, "IT-TN": {}, "IT-TO": {}, + "IT-TP": {}, "IT-TR": {}, "IT-TS": {}, "IT-TV": {}, "IT-UD": {}, + "IT-VA": {}, "IT-VB": {}, "IT-VC": {}, "IT-VE": {}, "IT-VI": {}, + "IT-VR": {}, "IT-VS": {}, "IT-VT": {}, "IT-VV": {}, "JM-01": {}, + "JM-02": {}, "JM-03": {}, "JM-04": {}, "JM-05": {}, "JM-06": {}, + "JM-07": {}, "JM-08": {}, "JM-09": {}, "JM-10": {}, "JM-11": {}, + "JM-12": {}, "JM-13": {}, "JM-14": {}, "JO-AJ": {}, "JO-AM": {}, + "JO-AQ": {}, "JO-AT": {}, "JO-AZ": {}, "JO-BA": {}, "JO-IR": {}, + "JO-JA": {}, "JO-KA": {}, "JO-MA": {}, "JO-MD": {}, "JO-MN": {}, + "JP-01": {}, "JP-02": {}, "JP-03": {}, "JP-04": {}, "JP-05": {}, + "JP-06": {}, "JP-07": {}, "JP-08": {}, "JP-09": {}, "JP-10": {}, + "JP-11": {}, "JP-12": {}, "JP-13": {}, "JP-14": {}, "JP-15": {}, + "JP-16": {}, "JP-17": {}, "JP-18": {}, "JP-19": {}, "JP-20": {}, + "JP-21": {}, "JP-22": {}, "JP-23": {}, "JP-24": {}, "JP-25": {}, + "JP-26": {}, "JP-27": {}, "JP-28": {}, "JP-29": {}, "JP-30": {}, + "JP-31": {}, "JP-32": {}, "JP-33": {}, "JP-34": {}, "JP-35": {}, + "JP-36": {}, "JP-37": {}, "JP-38": {}, "JP-39": {}, "JP-40": {}, + "JP-41": {}, "JP-42": {}, "JP-43": {}, "JP-44": {}, "JP-45": {}, + "JP-46": {}, "JP-47": {}, "KE-01": {}, "KE-02": {}, "KE-03": {}, + "KE-04": {}, "KE-05": {}, "KE-06": {}, "KE-07": {}, "KE-08": {}, + "KE-09": {}, "KE-10": {}, "KE-11": {}, "KE-12": {}, "KE-13": {}, + "KE-14": {}, "KE-15": {}, "KE-16": {}, "KE-17": {}, "KE-18": {}, + "KE-19": {}, "KE-20": {}, "KE-21": {}, "KE-22": {}, "KE-23": {}, + "KE-24": {}, "KE-25": {}, "KE-26": {}, "KE-27": {}, "KE-28": {}, + "KE-29": {}, "KE-30": {}, "KE-31": {}, "KE-32": {}, "KE-33": {}, + "KE-34": {}, "KE-35": {}, "KE-36": {}, "KE-37": {}, "KE-38": {}, + "KE-39": {}, "KE-40": {}, "KE-41": {}, "KE-42": {}, "KE-43": {}, + "KE-44": {}, "KE-45": {}, "KE-46": {}, "KE-47": {}, "KG-B": {}, + "KG-C": {}, "KG-GB": {}, "KG-GO": {}, "KG-J": {}, "KG-N": {}, "KG-O": {}, + "KG-T": {}, "KG-Y": {}, "KH-1": {}, "KH-10": {}, "KH-11": {}, + "KH-12": {}, "KH-13": {}, "KH-14": {}, "KH-15": {}, "KH-16": {}, + "KH-17": {}, "KH-18": {}, "KH-19": {}, "KH-2": {}, "KH-20": {}, + "KH-21": {}, "KH-22": {}, "KH-23": {}, "KH-24": {}, "KH-3": {}, + "KH-4": {}, "KH-5": {}, "KH-6": {}, "KH-7": {}, "KH-8": {}, + "KH-9": {}, "KI-G": {}, "KI-L": {}, "KI-P": {}, "KM-A": {}, + "KM-G": {}, "KM-M": {}, "KN-01": {}, "KN-02": {}, "KN-03": {}, + "KN-04": {}, "KN-05": {}, "KN-06": {}, "KN-07": {}, "KN-08": {}, + "KN-09": {}, "KN-10": {}, "KN-11": {}, "KN-12": {}, "KN-13": {}, + "KN-15": {}, "KN-K": {}, "KN-N": {}, "KP-01": {}, "KP-02": {}, + "KP-03": {}, "KP-04": {}, "KP-05": {}, "KP-06": {}, "KP-07": {}, + "KP-08": {}, "KP-09": {}, "KP-10": {}, "KP-13": {}, "KR-11": {}, + "KR-26": {}, "KR-27": {}, "KR-28": {}, "KR-29": {}, "KR-30": {}, + "KR-31": {}, "KR-41": {}, "KR-42": {}, "KR-43": {}, "KR-44": {}, + "KR-45": {}, "KR-46": {}, "KR-47": {}, "KR-48": {}, "KR-49": {}, + "KW-AH": {}, "KW-FA": {}, "KW-HA": {}, "KW-JA": {}, "KW-KU": {}, + "KW-MU": {}, "KZ-10": {}, "KZ-75": {}, "KZ-19": {}, "KZ-11": {}, + "KZ-15": {}, "KZ-71": {}, "KZ-23": {}, "KZ-27": {}, "KZ-47": {}, + "KZ-55": {}, "KZ-35": {}, "KZ-39": {}, "KZ-43": {}, "KZ-63": {}, + "KZ-79": {}, "KZ-59": {}, "KZ-61": {}, "KZ-62": {}, "KZ-31": {}, + "KZ-33": {}, "LA-AT": {}, "LA-BK": {}, "LA-BL": {}, + "LA-CH": {}, "LA-HO": {}, "LA-KH": {}, "LA-LM": {}, "LA-LP": {}, + "LA-OU": {}, "LA-PH": {}, "LA-SL": {}, "LA-SV": {}, "LA-VI": {}, + "LA-VT": {}, "LA-XA": {}, "LA-XE": {}, "LA-XI": {}, "LA-XS": {}, + "LB-AK": {}, "LB-AS": {}, "LB-BA": {}, "LB-BH": {}, "LB-BI": {}, + "LB-JA": {}, "LB-JL": {}, "LB-NA": {}, "LC-01": {}, "LC-02": {}, + "LC-03": {}, "LC-05": {}, "LC-06": {}, "LC-07": {}, "LC-08": {}, + "LC-10": {}, "LC-11": {}, "LI-01": {}, "LI-02": {}, + "LI-03": {}, "LI-04": {}, "LI-05": {}, "LI-06": {}, "LI-07": {}, + "LI-08": {}, "LI-09": {}, "LI-10": {}, "LI-11": {}, "LK-1": {}, + "LK-11": {}, "LK-12": {}, "LK-13": {}, "LK-2": {}, "LK-21": {}, + "LK-22": {}, "LK-23": {}, "LK-3": {}, "LK-31": {}, "LK-32": {}, + "LK-33": {}, "LK-4": {}, "LK-41": {}, "LK-42": {}, "LK-43": {}, + "LK-44": {}, "LK-45": {}, "LK-5": {}, "LK-51": {}, "LK-52": {}, + "LK-53": {}, "LK-6": {}, "LK-61": {}, "LK-62": {}, "LK-7": {}, + "LK-71": {}, "LK-72": {}, "LK-8": {}, "LK-81": {}, "LK-82": {}, + "LK-9": {}, "LK-91": {}, "LK-92": {}, "LR-BG": {}, "LR-BM": {}, + "LR-CM": {}, "LR-GB": {}, "LR-GG": {}, "LR-GK": {}, "LR-LO": {}, + "LR-MG": {}, "LR-MO": {}, "LR-MY": {}, "LR-NI": {}, "LR-RI": {}, + "LR-SI": {}, "LS-A": {}, "LS-B": {}, "LS-C": {}, "LS-D": {}, + "LS-E": {}, "LS-F": {}, "LS-G": {}, "LS-H": {}, "LS-J": {}, + "LS-K": {}, "LT-AL": {}, "LT-KL": {}, "LT-KU": {}, "LT-MR": {}, + "LT-PN": {}, "LT-SA": {}, "LT-TA": {}, "LT-TE": {}, "LT-UT": {}, + "LT-VL": {}, "LU-CA": {}, "LU-CL": {}, "LU-DI": {}, "LU-EC": {}, + "LU-ES": {}, "LU-GR": {}, "LU-LU": {}, "LU-ME": {}, "LU-RD": {}, + "LU-RM": {}, "LU-VD": {}, "LU-WI": {}, "LU-D": {}, "LU-G": {}, "LU-L": {}, + "LV-001": {}, "LV-111": {}, "LV-112": {}, "LV-113": {}, + "LV-002": {}, "LV-003": {}, "LV-004": {}, "LV-005": {}, "LV-006": {}, + "LV-007": {}, "LV-008": {}, "LV-009": {}, "LV-010": {}, "LV-011": {}, + "LV-012": {}, "LV-013": {}, "LV-014": {}, "LV-015": {}, "LV-016": {}, + "LV-017": {}, "LV-018": {}, "LV-019": {}, "LV-020": {}, "LV-021": {}, + "LV-022": {}, "LV-023": {}, "LV-024": {}, "LV-025": {}, "LV-026": {}, + "LV-027": {}, "LV-028": {}, "LV-029": {}, "LV-030": {}, "LV-031": {}, + "LV-032": {}, "LV-033": {}, "LV-034": {}, "LV-035": {}, "LV-036": {}, + "LV-037": {}, "LV-038": {}, "LV-039": {}, "LV-040": {}, "LV-041": {}, + "LV-042": {}, "LV-043": {}, "LV-044": {}, "LV-045": {}, "LV-046": {}, + "LV-047": {}, "LV-048": {}, "LV-049": {}, "LV-050": {}, "LV-051": {}, + "LV-052": {}, "LV-053": {}, "LV-054": {}, "LV-055": {}, "LV-056": {}, + "LV-057": {}, "LV-058": {}, "LV-059": {}, "LV-060": {}, "LV-061": {}, + "LV-062": {}, "LV-063": {}, "LV-064": {}, "LV-065": {}, "LV-066": {}, + "LV-067": {}, "LV-068": {}, "LV-069": {}, "LV-070": {}, "LV-071": {}, + "LV-072": {}, "LV-073": {}, "LV-074": {}, "LV-075": {}, "LV-076": {}, + "LV-077": {}, "LV-078": {}, "LV-079": {}, "LV-080": {}, "LV-081": {}, + "LV-082": {}, "LV-083": {}, "LV-084": {}, "LV-085": {}, "LV-086": {}, + "LV-087": {}, "LV-088": {}, "LV-089": {}, "LV-090": {}, "LV-091": {}, + "LV-092": {}, "LV-093": {}, "LV-094": {}, "LV-095": {}, "LV-096": {}, + "LV-097": {}, "LV-098": {}, "LV-099": {}, "LV-100": {}, "LV-101": {}, + "LV-102": {}, "LV-103": {}, "LV-104": {}, "LV-105": {}, "LV-106": {}, + "LV-107": {}, "LV-108": {}, "LV-109": {}, "LV-110": {}, "LV-DGV": {}, + "LV-JEL": {}, "LV-JKB": {}, "LV-JUR": {}, "LV-LPX": {}, "LV-REZ": {}, + "LV-RIX": {}, "LV-VEN": {}, "LV-VMR": {}, "LY-BA": {}, "LY-BU": {}, + "LY-DR": {}, "LY-GT": {}, "LY-JA": {}, "LY-JB": {}, "LY-JG": {}, + "LY-JI": {}, "LY-JU": {}, "LY-KF": {}, "LY-MB": {}, "LY-MI": {}, + "LY-MJ": {}, "LY-MQ": {}, "LY-NL": {}, "LY-NQ": {}, "LY-SB": {}, + "LY-SR": {}, "LY-TB": {}, "LY-WA": {}, "LY-WD": {}, "LY-WS": {}, + "LY-ZA": {}, "MA-01": {}, "MA-02": {}, "MA-03": {}, "MA-04": {}, + "MA-05": {}, "MA-06": {}, "MA-07": {}, "MA-08": {}, "MA-09": {}, + "MA-10": {}, "MA-11": {}, "MA-12": {}, "MA-13": {}, "MA-14": {}, + "MA-15": {}, "MA-16": {}, "MA-AGD": {}, "MA-AOU": {}, "MA-ASZ": {}, + "MA-AZI": {}, "MA-BEM": {}, "MA-BER": {}, "MA-BES": {}, "MA-BOD": {}, + "MA-BOM": {}, "MA-CAS": {}, "MA-CHE": {}, "MA-CHI": {}, "MA-CHT": {}, + "MA-ERR": {}, "MA-ESI": {}, "MA-ESM": {}, "MA-FAH": {}, "MA-FES": {}, + "MA-FIG": {}, "MA-GUE": {}, "MA-HAJ": {}, "MA-HAO": {}, "MA-HOC": {}, + "MA-IFR": {}, "MA-INE": {}, "MA-JDI": {}, "MA-JRA": {}, "MA-KEN": {}, + "MA-KES": {}, "MA-KHE": {}, "MA-KHN": {}, "MA-KHO": {}, "MA-LAA": {}, + "MA-LAR": {}, "MA-MED": {}, "MA-MEK": {}, "MA-MMD": {}, "MA-MMN": {}, + "MA-MOH": {}, "MA-MOU": {}, "MA-NAD": {}, "MA-NOU": {}, "MA-OUA": {}, + "MA-OUD": {}, "MA-OUJ": {}, "MA-RAB": {}, "MA-SAF": {}, "MA-SAL": {}, + "MA-SEF": {}, "MA-SET": {}, "MA-SIK": {}, "MA-SKH": {}, "MA-SYB": {}, + "MA-TAI": {}, "MA-TAO": {}, "MA-TAR": {}, "MA-TAT": {}, "MA-TAZ": {}, + "MA-TET": {}, "MA-TIZ": {}, "MA-TNG": {}, "MA-TNT": {}, "MA-ZAG": {}, + "MC-CL": {}, "MC-CO": {}, "MC-FO": {}, "MC-GA": {}, "MC-JE": {}, + "MC-LA": {}, "MC-MA": {}, "MC-MC": {}, "MC-MG": {}, "MC-MO": {}, + "MC-MU": {}, "MC-PH": {}, "MC-SD": {}, "MC-SO": {}, "MC-SP": {}, + "MC-SR": {}, "MC-VR": {}, "MD-AN": {}, "MD-BA": {}, "MD-BD": {}, + "MD-BR": {}, "MD-BS": {}, "MD-CA": {}, "MD-CL": {}, "MD-CM": {}, + "MD-CR": {}, "MD-CS": {}, "MD-CT": {}, "MD-CU": {}, "MD-DO": {}, + "MD-DR": {}, "MD-DU": {}, "MD-ED": {}, "MD-FA": {}, "MD-FL": {}, + "MD-GA": {}, "MD-GL": {}, "MD-HI": {}, "MD-IA": {}, "MD-LE": {}, + "MD-NI": {}, "MD-OC": {}, "MD-OR": {}, "MD-RE": {}, "MD-RI": {}, + "MD-SD": {}, "MD-SI": {}, "MD-SN": {}, "MD-SO": {}, "MD-ST": {}, + "MD-SV": {}, "MD-TA": {}, "MD-TE": {}, "MD-UN": {}, "ME-01": {}, + "ME-02": {}, "ME-03": {}, "ME-04": {}, "ME-05": {}, "ME-06": {}, + "ME-07": {}, "ME-08": {}, "ME-09": {}, "ME-10": {}, "ME-11": {}, + "ME-12": {}, "ME-13": {}, "ME-14": {}, "ME-15": {}, "ME-16": {}, + "ME-17": {}, "ME-18": {}, "ME-19": {}, "ME-20": {}, "ME-21": {}, "ME-24": {}, + "MG-A": {}, "MG-D": {}, "MG-F": {}, "MG-M": {}, "MG-T": {}, + "MG-U": {}, "MH-ALK": {}, "MH-ALL": {}, "MH-ARN": {}, "MH-AUR": {}, + "MH-EBO": {}, "MH-ENI": {}, "MH-JAB": {}, "MH-JAL": {}, "MH-KIL": {}, + "MH-KWA": {}, "MH-L": {}, "MH-LAE": {}, "MH-LIB": {}, "MH-LIK": {}, + "MH-MAJ": {}, "MH-MAL": {}, "MH-MEJ": {}, "MH-MIL": {}, "MH-NMK": {}, + "MH-NMU": {}, "MH-RON": {}, "MH-T": {}, "MH-UJA": {}, "MH-UTI": {}, + "MH-WTJ": {}, "MH-WTN": {}, "MK-101": {}, "MK-102": {}, "MK-103": {}, + "MK-104": {}, "MK-105": {}, + "MK-106": {}, "MK-107": {}, "MK-108": {}, "MK-109": {}, "MK-201": {}, + "MK-202": {}, "MK-205": {}, "MK-206": {}, "MK-207": {}, "MK-208": {}, + "MK-209": {}, "MK-210": {}, "MK-211": {}, "MK-301": {}, "MK-303": {}, + "MK-307": {}, "MK-308": {}, "MK-310": {}, "MK-311": {}, "MK-312": {}, + "MK-401": {}, "MK-402": {}, "MK-403": {}, "MK-404": {}, "MK-405": {}, + "MK-406": {}, "MK-408": {}, "MK-409": {}, "MK-410": {}, "MK-501": {}, + "MK-502": {}, "MK-503": {}, "MK-505": {}, "MK-506": {}, "MK-507": {}, + "MK-508": {}, "MK-509": {}, "MK-601": {}, "MK-602": {}, "MK-604": {}, + "MK-605": {}, "MK-606": {}, "MK-607": {}, "MK-608": {}, "MK-609": {}, + "MK-701": {}, "MK-702": {}, "MK-703": {}, "MK-704": {}, "MK-705": {}, + "MK-803": {}, "MK-804": {}, "MK-806": {}, "MK-807": {}, "MK-809": {}, + "MK-810": {}, "MK-811": {}, "MK-812": {}, "MK-813": {}, "MK-814": {}, + "MK-816": {}, "ML-1": {}, "ML-2": {}, "ML-3": {}, "ML-4": {}, + "ML-5": {}, "ML-6": {}, "ML-7": {}, "ML-8": {}, "ML-BKO": {}, + "MM-01": {}, "MM-02": {}, "MM-03": {}, "MM-04": {}, "MM-05": {}, + "MM-06": {}, "MM-07": {}, "MM-11": {}, "MM-12": {}, "MM-13": {}, + "MM-14": {}, "MM-15": {}, "MM-16": {}, "MM-17": {}, "MM-18": {}, "MN-035": {}, + "MN-037": {}, "MN-039": {}, "MN-041": {}, "MN-043": {}, "MN-046": {}, + "MN-047": {}, "MN-049": {}, "MN-051": {}, "MN-053": {}, "MN-055": {}, + "MN-057": {}, "MN-059": {}, "MN-061": {}, "MN-063": {}, "MN-064": {}, + "MN-065": {}, "MN-067": {}, "MN-069": {}, "MN-071": {}, "MN-073": {}, + "MN-1": {}, "MR-01": {}, "MR-02": {}, "MR-03": {}, "MR-04": {}, + "MR-05": {}, "MR-06": {}, "MR-07": {}, "MR-08": {}, "MR-09": {}, + "MR-10": {}, "MR-11": {}, "MR-12": {}, "MR-13": {}, "MR-NKC": {}, "MT-01": {}, + "MT-02": {}, "MT-03": {}, "MT-04": {}, "MT-05": {}, "MT-06": {}, + "MT-07": {}, "MT-08": {}, "MT-09": {}, "MT-10": {}, "MT-11": {}, + "MT-12": {}, "MT-13": {}, "MT-14": {}, "MT-15": {}, "MT-16": {}, + "MT-17": {}, "MT-18": {}, "MT-19": {}, "MT-20": {}, "MT-21": {}, + "MT-22": {}, "MT-23": {}, "MT-24": {}, "MT-25": {}, "MT-26": {}, + "MT-27": {}, "MT-28": {}, "MT-29": {}, "MT-30": {}, "MT-31": {}, + "MT-32": {}, "MT-33": {}, "MT-34": {}, "MT-35": {}, "MT-36": {}, + "MT-37": {}, "MT-38": {}, "MT-39": {}, "MT-40": {}, "MT-41": {}, + "MT-42": {}, "MT-43": {}, "MT-44": {}, "MT-45": {}, "MT-46": {}, + "MT-47": {}, "MT-48": {}, "MT-49": {}, "MT-50": {}, "MT-51": {}, + "MT-52": {}, "MT-53": {}, "MT-54": {}, "MT-55": {}, "MT-56": {}, + "MT-57": {}, "MT-58": {}, "MT-59": {}, "MT-60": {}, "MT-61": {}, + "MT-62": {}, "MT-63": {}, "MT-64": {}, "MT-65": {}, "MT-66": {}, + "MT-67": {}, "MT-68": {}, "MU-AG": {}, "MU-BL": {}, "MU-BR": {}, + "MU-CC": {}, "MU-CU": {}, "MU-FL": {}, "MU-GP": {}, "MU-MO": {}, + "MU-PA": {}, "MU-PL": {}, "MU-PU": {}, "MU-PW": {}, "MU-QB": {}, + "MU-RO": {}, "MU-RP": {}, "MU-RR": {}, "MU-SA": {}, "MU-VP": {}, "MV-00": {}, + "MV-01": {}, "MV-02": {}, "MV-03": {}, "MV-04": {}, "MV-05": {}, + "MV-07": {}, "MV-08": {}, "MV-12": {}, "MV-13": {}, "MV-14": {}, + "MV-17": {}, "MV-20": {}, "MV-23": {}, "MV-24": {}, "MV-25": {}, + "MV-26": {}, "MV-27": {}, "MV-28": {}, "MV-29": {}, "MV-CE": {}, + "MV-MLE": {}, "MV-NC": {}, "MV-NO": {}, "MV-SC": {}, "MV-SU": {}, + "MV-UN": {}, "MV-US": {}, "MW-BA": {}, "MW-BL": {}, "MW-C": {}, + "MW-CK": {}, "MW-CR": {}, "MW-CT": {}, "MW-DE": {}, "MW-DO": {}, + "MW-KR": {}, "MW-KS": {}, "MW-LI": {}, "MW-LK": {}, "MW-MC": {}, + "MW-MG": {}, "MW-MH": {}, "MW-MU": {}, "MW-MW": {}, "MW-MZ": {}, + "MW-N": {}, "MW-NB": {}, "MW-NE": {}, "MW-NI": {}, "MW-NK": {}, + "MW-NS": {}, "MW-NU": {}, "MW-PH": {}, "MW-RU": {}, "MW-S": {}, + "MW-SA": {}, "MW-TH": {}, "MW-ZO": {}, "MX-AGU": {}, "MX-BCN": {}, + "MX-BCS": {}, "MX-CAM": {}, "MX-CHH": {}, "MX-CHP": {}, "MX-COA": {}, + "MX-COL": {}, "MX-CMX": {}, "MX-DIF": {}, "MX-DUR": {}, "MX-GRO": {}, "MX-GUA": {}, + "MX-HID": {}, "MX-JAL": {}, "MX-MEX": {}, "MX-MIC": {}, "MX-MOR": {}, + "MX-NAY": {}, "MX-NLE": {}, "MX-OAX": {}, "MX-PUE": {}, "MX-QUE": {}, + "MX-ROO": {}, "MX-SIN": {}, "MX-SLP": {}, "MX-SON": {}, "MX-TAB": {}, + "MX-TAM": {}, "MX-TLA": {}, "MX-VER": {}, "MX-YUC": {}, "MX-ZAC": {}, + "MY-01": {}, "MY-02": {}, "MY-03": {}, "MY-04": {}, "MY-05": {}, + "MY-06": {}, "MY-07": {}, "MY-08": {}, "MY-09": {}, "MY-10": {}, + "MY-11": {}, "MY-12": {}, "MY-13": {}, "MY-14": {}, "MY-15": {}, + "MY-16": {}, "MZ-A": {}, "MZ-B": {}, "MZ-G": {}, "MZ-I": {}, + "MZ-L": {}, "MZ-MPM": {}, "MZ-N": {}, "MZ-P": {}, "MZ-Q": {}, + "MZ-S": {}, "MZ-T": {}, "NA-CA": {}, "NA-ER": {}, "NA-HA": {}, + "NA-KA": {}, "NA-KE": {}, "NA-KH": {}, "NA-KU": {}, "NA-KW": {}, "NA-OD": {}, "NA-OH": {}, + "NA-OK": {}, "NA-ON": {}, "NA-OS": {}, "NA-OT": {}, "NA-OW": {}, + "NE-1": {}, "NE-2": {}, "NE-3": {}, "NE-4": {}, "NE-5": {}, + "NE-6": {}, "NE-7": {}, "NE-8": {}, "NG-AB": {}, "NG-AD": {}, + "NG-AK": {}, "NG-AN": {}, "NG-BA": {}, "NG-BE": {}, "NG-BO": {}, + "NG-BY": {}, "NG-CR": {}, "NG-DE": {}, "NG-EB": {}, "NG-ED": {}, + "NG-EK": {}, "NG-EN": {}, "NG-FC": {}, "NG-GO": {}, "NG-IM": {}, + "NG-JI": {}, "NG-KD": {}, "NG-KE": {}, "NG-KN": {}, "NG-KO": {}, + "NG-KT": {}, "NG-KW": {}, "NG-LA": {}, "NG-NA": {}, "NG-NI": {}, + "NG-OG": {}, "NG-ON": {}, "NG-OS": {}, "NG-OY": {}, "NG-PL": {}, + "NG-RI": {}, "NG-SO": {}, "NG-TA": {}, "NG-YO": {}, "NG-ZA": {}, + "NI-AN": {}, "NI-AS": {}, "NI-BO": {}, "NI-CA": {}, "NI-CI": {}, + "NI-CO": {}, "NI-ES": {}, "NI-GR": {}, "NI-JI": {}, "NI-LE": {}, + "NI-MD": {}, "NI-MN": {}, "NI-MS": {}, "NI-MT": {}, "NI-NS": {}, + "NI-RI": {}, "NI-SJ": {}, "NL-AW": {}, "NL-BQ1": {}, "NL-BQ2": {}, + "NL-BQ3": {}, "NL-CW": {}, "NL-DR": {}, "NL-FL": {}, "NL-FR": {}, + "NL-GE": {}, "NL-GR": {}, "NL-LI": {}, "NL-NB": {}, "NL-NH": {}, + "NL-OV": {}, "NL-SX": {}, "NL-UT": {}, "NL-ZE": {}, "NL-ZH": {}, + "NO-03": {}, "NO-11": {}, "NO-15": {}, "NO-16": {}, "NO-17": {}, + "NO-18": {}, "NO-21": {}, "NO-30": {}, "NO-34": {}, "NO-38": {}, + "NO-42": {}, "NO-46": {}, "NO-50": {}, "NO-54": {}, + "NO-22": {}, "NP-1": {}, "NP-2": {}, "NP-3": {}, "NP-4": {}, + "NP-5": {}, "NP-BA": {}, "NP-BH": {}, "NP-DH": {}, "NP-GA": {}, + "NP-JA": {}, "NP-KA": {}, "NP-KO": {}, "NP-LU": {}, "NP-MA": {}, + "NP-ME": {}, "NP-NA": {}, "NP-RA": {}, "NP-SA": {}, "NP-SE": {}, + "NR-01": {}, "NR-02": {}, "NR-03": {}, "NR-04": {}, "NR-05": {}, + "NR-06": {}, "NR-07": {}, "NR-08": {}, "NR-09": {}, "NR-10": {}, + "NR-11": {}, "NR-12": {}, "NR-13": {}, "NR-14": {}, "NZ-AUK": {}, + "NZ-BOP": {}, "NZ-CAN": {}, "NZ-CIT": {}, "NZ-GIS": {}, "NZ-HKB": {}, + "NZ-MBH": {}, "NZ-MWT": {}, "NZ-N": {}, "NZ-NSN": {}, "NZ-NTL": {}, + "NZ-OTA": {}, "NZ-S": {}, "NZ-STL": {}, "NZ-TAS": {}, "NZ-TKI": {}, + "NZ-WGN": {}, "NZ-WKO": {}, "NZ-WTC": {}, "OM-BA": {}, "OM-BS": {}, "OM-BU": {}, "OM-BJ": {}, + "OM-DA": {}, "OM-MA": {}, "OM-MU": {}, "OM-SH": {}, "OM-SJ": {}, "OM-SS": {}, "OM-WU": {}, + "OM-ZA": {}, "OM-ZU": {}, "PA-1": {}, "PA-2": {}, "PA-3": {}, + "PA-4": {}, "PA-5": {}, "PA-6": {}, "PA-7": {}, "PA-8": {}, + "PA-9": {}, "PA-EM": {}, "PA-KY": {}, "PA-NB": {}, "PE-AMA": {}, + "PE-ANC": {}, "PE-APU": {}, "PE-ARE": {}, "PE-AYA": {}, "PE-CAJ": {}, + "PE-CAL": {}, "PE-CUS": {}, "PE-HUC": {}, "PE-HUV": {}, "PE-ICA": {}, + "PE-JUN": {}, "PE-LAL": {}, "PE-LAM": {}, "PE-LIM": {}, "PE-LMA": {}, + "PE-LOR": {}, "PE-MDD": {}, "PE-MOQ": {}, "PE-PAS": {}, "PE-PIU": {}, + "PE-PUN": {}, "PE-SAM": {}, "PE-TAC": {}, "PE-TUM": {}, "PE-UCA": {}, + "PG-CPK": {}, "PG-CPM": {}, "PG-EBR": {}, "PG-EHG": {}, "PG-EPW": {}, + "PG-ESW": {}, "PG-GPK": {}, "PG-MBA": {}, "PG-MPL": {}, "PG-MPM": {}, + "PG-MRL": {}, "PG-NCD": {}, "PG-NIK": {}, "PG-NPP": {}, "PG-NSB": {}, + "PG-SAN": {}, "PG-SHM": {}, "PG-WBK": {}, "PG-WHM": {}, "PG-WPD": {}, + "PH-00": {}, "PH-01": {}, "PH-02": {}, "PH-03": {}, "PH-05": {}, + "PH-06": {}, "PH-07": {}, "PH-08": {}, "PH-09": {}, "PH-10": {}, + "PH-11": {}, "PH-12": {}, "PH-13": {}, "PH-14": {}, "PH-15": {}, + "PH-40": {}, "PH-41": {}, "PH-ABR": {}, "PH-AGN": {}, "PH-AGS": {}, + "PH-AKL": {}, "PH-ALB": {}, "PH-ANT": {}, "PH-APA": {}, "PH-AUR": {}, + "PH-BAN": {}, "PH-BAS": {}, "PH-BEN": {}, "PH-BIL": {}, "PH-BOH": {}, + "PH-BTG": {}, "PH-BTN": {}, "PH-BUK": {}, "PH-BUL": {}, "PH-CAG": {}, + "PH-CAM": {}, "PH-CAN": {}, "PH-CAP": {}, "PH-CAS": {}, "PH-CAT": {}, + "PH-CAV": {}, "PH-CEB": {}, "PH-COM": {}, "PH-DAO": {}, "PH-DAS": {}, + "PH-DAV": {}, "PH-DIN": {}, "PH-EAS": {}, "PH-GUI": {}, "PH-IFU": {}, + "PH-ILI": {}, "PH-ILN": {}, "PH-ILS": {}, "PH-ISA": {}, "PH-KAL": {}, + "PH-LAG": {}, "PH-LAN": {}, "PH-LAS": {}, "PH-LEY": {}, "PH-LUN": {}, + "PH-MAD": {}, "PH-MAG": {}, "PH-MAS": {}, "PH-MDC": {}, "PH-MDR": {}, + "PH-MOU": {}, "PH-MSC": {}, "PH-MSR": {}, "PH-NCO": {}, "PH-NEC": {}, + "PH-NER": {}, "PH-NSA": {}, "PH-NUE": {}, "PH-NUV": {}, "PH-PAM": {}, + "PH-PAN": {}, "PH-PLW": {}, "PH-QUE": {}, "PH-QUI": {}, "PH-RIZ": {}, + "PH-ROM": {}, "PH-SAR": {}, "PH-SCO": {}, "PH-SIG": {}, "PH-SLE": {}, + "PH-SLU": {}, "PH-SOR": {}, "PH-SUK": {}, "PH-SUN": {}, "PH-SUR": {}, + "PH-TAR": {}, "PH-TAW": {}, "PH-WSA": {}, "PH-ZAN": {}, "PH-ZAS": {}, + "PH-ZMB": {}, "PH-ZSI": {}, "PK-BA": {}, "PK-GB": {}, "PK-IS": {}, + "PK-JK": {}, "PK-KP": {}, "PK-PB": {}, "PK-SD": {}, "PK-TA": {}, + "PL-02": {}, "PL-04": {}, "PL-06": {}, "PL-08": {}, "PL-10": {}, + "PL-12": {}, "PL-14": {}, "PL-16": {}, "PL-18": {}, "PL-20": {}, + "PL-22": {}, "PL-24": {}, "PL-26": {}, "PL-28": {}, "PL-30": {}, "PL-32": {}, + "PS-BTH": {}, "PS-DEB": {}, "PS-GZA": {}, "PS-HBN": {}, + "PS-JEM": {}, "PS-JEN": {}, "PS-JRH": {}, "PS-KYS": {}, "PS-NBS": {}, + "PS-NGZ": {}, "PS-QQA": {}, "PS-RBH": {}, "PS-RFH": {}, "PS-SLT": {}, + "PS-TBS": {}, "PS-TKM": {}, "PT-01": {}, "PT-02": {}, "PT-03": {}, + "PT-04": {}, "PT-05": {}, "PT-06": {}, "PT-07": {}, "PT-08": {}, + "PT-09": {}, "PT-10": {}, "PT-11": {}, "PT-12": {}, "PT-13": {}, + "PT-14": {}, "PT-15": {}, "PT-16": {}, "PT-17": {}, "PT-18": {}, + "PT-20": {}, "PT-30": {}, "PW-002": {}, "PW-004": {}, "PW-010": {}, + "PW-050": {}, "PW-100": {}, "PW-150": {}, "PW-212": {}, "PW-214": {}, + "PW-218": {}, "PW-222": {}, "PW-224": {}, "PW-226": {}, "PW-227": {}, + "PW-228": {}, "PW-350": {}, "PW-370": {}, "PY-1": {}, "PY-10": {}, + "PY-11": {}, "PY-12": {}, "PY-13": {}, "PY-14": {}, "PY-15": {}, + "PY-16": {}, "PY-19": {}, "PY-2": {}, "PY-3": {}, "PY-4": {}, + "PY-5": {}, "PY-6": {}, "PY-7": {}, "PY-8": {}, "PY-9": {}, + "PY-ASU": {}, "QA-DA": {}, "QA-KH": {}, "QA-MS": {}, "QA-RA": {}, + "QA-US": {}, "QA-WA": {}, "QA-ZA": {}, "RO-AB": {}, "RO-AG": {}, + "RO-AR": {}, "RO-B": {}, "RO-BC": {}, "RO-BH": {}, "RO-BN": {}, + "RO-BR": {}, "RO-BT": {}, "RO-BV": {}, "RO-BZ": {}, "RO-CJ": {}, + "RO-CL": {}, "RO-CS": {}, "RO-CT": {}, "RO-CV": {}, "RO-DB": {}, + "RO-DJ": {}, "RO-GJ": {}, "RO-GL": {}, "RO-GR": {}, "RO-HD": {}, + "RO-HR": {}, "RO-IF": {}, "RO-IL": {}, "RO-IS": {}, "RO-MH": {}, + "RO-MM": {}, "RO-MS": {}, "RO-NT": {}, "RO-OT": {}, "RO-PH": {}, + "RO-SB": {}, "RO-SJ": {}, "RO-SM": {}, "RO-SV": {}, "RO-TL": {}, + "RO-TM": {}, "RO-TR": {}, "RO-VL": {}, "RO-VN": {}, "RO-VS": {}, + "RS-00": {}, "RS-01": {}, "RS-02": {}, "RS-03": {}, "RS-04": {}, + "RS-05": {}, "RS-06": {}, "RS-07": {}, "RS-08": {}, "RS-09": {}, + "RS-10": {}, "RS-11": {}, "RS-12": {}, "RS-13": {}, "RS-14": {}, + "RS-15": {}, "RS-16": {}, "RS-17": {}, "RS-18": {}, "RS-19": {}, + "RS-20": {}, "RS-21": {}, "RS-22": {}, "RS-23": {}, "RS-24": {}, + "RS-25": {}, "RS-26": {}, "RS-27": {}, "RS-28": {}, "RS-29": {}, + "RS-KM": {}, "RS-VO": {}, "RU-AD": {}, "RU-AL": {}, "RU-ALT": {}, + "RU-AMU": {}, "RU-ARK": {}, "RU-AST": {}, "RU-BA": {}, "RU-BEL": {}, + "RU-BRY": {}, "RU-BU": {}, "RU-CE": {}, "RU-CHE": {}, "RU-CHU": {}, + "RU-CU": {}, "RU-DA": {}, "RU-IN": {}, "RU-IRK": {}, "RU-IVA": {}, + "RU-KAM": {}, "RU-KB": {}, "RU-KC": {}, "RU-KDA": {}, "RU-KEM": {}, + "RU-KGD": {}, "RU-KGN": {}, "RU-KHA": {}, "RU-KHM": {}, "RU-KIR": {}, + "RU-KK": {}, "RU-KL": {}, "RU-KLU": {}, "RU-KO": {}, "RU-KOS": {}, + "RU-KR": {}, "RU-KRS": {}, "RU-KYA": {}, "RU-LEN": {}, "RU-LIP": {}, + "RU-MAG": {}, "RU-ME": {}, "RU-MO": {}, "RU-MOS": {}, "RU-MOW": {}, + "RU-MUR": {}, "RU-NEN": {}, "RU-NGR": {}, "RU-NIZ": {}, "RU-NVS": {}, + "RU-OMS": {}, "RU-ORE": {}, "RU-ORL": {}, "RU-PER": {}, "RU-PNZ": {}, + "RU-PRI": {}, "RU-PSK": {}, "RU-ROS": {}, "RU-RYA": {}, "RU-SA": {}, + "RU-SAK": {}, "RU-SAM": {}, "RU-SAR": {}, "RU-SE": {}, "RU-SMO": {}, + "RU-SPE": {}, "RU-STA": {}, "RU-SVE": {}, "RU-TA": {}, "RU-TAM": {}, + "RU-TOM": {}, "RU-TUL": {}, "RU-TVE": {}, "RU-TY": {}, "RU-TYU": {}, + "RU-UD": {}, "RU-ULY": {}, "RU-VGG": {}, "RU-VLA": {}, "RU-VLG": {}, + "RU-VOR": {}, "RU-YAN": {}, "RU-YAR": {}, "RU-YEV": {}, "RU-ZAB": {}, + "RW-01": {}, "RW-02": {}, "RW-03": {}, "RW-04": {}, "RW-05": {}, + "SA-01": {}, "SA-02": {}, "SA-03": {}, "SA-04": {}, "SA-05": {}, + "SA-06": {}, "SA-07": {}, "SA-08": {}, "SA-09": {}, "SA-10": {}, + "SA-11": {}, "SA-12": {}, "SA-14": {}, "SB-CE": {}, "SB-CH": {}, + "SB-CT": {}, "SB-GU": {}, "SB-IS": {}, "SB-MK": {}, "SB-ML": {}, + "SB-RB": {}, "SB-TE": {}, "SB-WE": {}, "SC-01": {}, "SC-02": {}, + "SC-03": {}, "SC-04": {}, "SC-05": {}, "SC-06": {}, "SC-07": {}, + "SC-08": {}, "SC-09": {}, "SC-10": {}, "SC-11": {}, "SC-12": {}, + "SC-13": {}, "SC-14": {}, "SC-15": {}, "SC-16": {}, "SC-17": {}, + "SC-18": {}, "SC-19": {}, "SC-20": {}, "SC-21": {}, "SC-22": {}, + "SC-23": {}, "SC-24": {}, "SC-25": {}, "SD-DC": {}, "SD-DE": {}, + "SD-DN": {}, "SD-DS": {}, "SD-DW": {}, "SD-GD": {}, "SD-GK": {}, "SD-GZ": {}, + "SD-KA": {}, "SD-KH": {}, "SD-KN": {}, "SD-KS": {}, "SD-NB": {}, + "SD-NO": {}, "SD-NR": {}, "SD-NW": {}, "SD-RS": {}, "SD-SI": {}, + "SE-AB": {}, "SE-AC": {}, "SE-BD": {}, "SE-C": {}, "SE-D": {}, + "SE-E": {}, "SE-F": {}, "SE-G": {}, "SE-H": {}, "SE-I": {}, + "SE-K": {}, "SE-M": {}, "SE-N": {}, "SE-O": {}, "SE-S": {}, + "SE-T": {}, "SE-U": {}, "SE-W": {}, "SE-X": {}, "SE-Y": {}, + "SE-Z": {}, "SG-01": {}, "SG-02": {}, "SG-03": {}, "SG-04": {}, + "SG-05": {}, "SH-AC": {}, "SH-HL": {}, "SH-TA": {}, "SI-001": {}, + "SI-002": {}, "SI-003": {}, "SI-004": {}, "SI-005": {}, "SI-006": {}, + "SI-007": {}, "SI-008": {}, "SI-009": {}, "SI-010": {}, "SI-011": {}, + "SI-012": {}, "SI-013": {}, "SI-014": {}, "SI-015": {}, "SI-016": {}, + "SI-017": {}, "SI-018": {}, "SI-019": {}, "SI-020": {}, "SI-021": {}, + "SI-022": {}, "SI-023": {}, "SI-024": {}, "SI-025": {}, "SI-026": {}, + "SI-027": {}, "SI-028": {}, "SI-029": {}, "SI-030": {}, "SI-031": {}, + "SI-032": {}, "SI-033": {}, "SI-034": {}, "SI-035": {}, "SI-036": {}, + "SI-037": {}, "SI-038": {}, "SI-039": {}, "SI-040": {}, "SI-041": {}, + "SI-042": {}, "SI-043": {}, "SI-044": {}, "SI-045": {}, "SI-046": {}, + "SI-047": {}, "SI-048": {}, "SI-049": {}, "SI-050": {}, "SI-051": {}, + "SI-052": {}, "SI-053": {}, "SI-054": {}, "SI-055": {}, "SI-056": {}, + "SI-057": {}, "SI-058": {}, "SI-059": {}, "SI-060": {}, "SI-061": {}, + "SI-062": {}, "SI-063": {}, "SI-064": {}, "SI-065": {}, "SI-066": {}, + "SI-067": {}, "SI-068": {}, "SI-069": {}, "SI-070": {}, "SI-071": {}, + "SI-072": {}, "SI-073": {}, "SI-074": {}, "SI-075": {}, "SI-076": {}, + "SI-077": {}, "SI-078": {}, "SI-079": {}, "SI-080": {}, "SI-081": {}, + "SI-082": {}, "SI-083": {}, "SI-084": {}, "SI-085": {}, "SI-086": {}, + "SI-087": {}, "SI-088": {}, "SI-089": {}, "SI-090": {}, "SI-091": {}, + "SI-092": {}, "SI-093": {}, "SI-094": {}, "SI-095": {}, "SI-096": {}, + "SI-097": {}, "SI-098": {}, "SI-099": {}, "SI-100": {}, "SI-101": {}, + "SI-102": {}, "SI-103": {}, "SI-104": {}, "SI-105": {}, "SI-106": {}, + "SI-107": {}, "SI-108": {}, "SI-109": {}, "SI-110": {}, "SI-111": {}, + "SI-112": {}, "SI-113": {}, "SI-114": {}, "SI-115": {}, "SI-116": {}, + "SI-117": {}, "SI-118": {}, "SI-119": {}, "SI-120": {}, "SI-121": {}, + "SI-122": {}, "SI-123": {}, "SI-124": {}, "SI-125": {}, "SI-126": {}, + "SI-127": {}, "SI-128": {}, "SI-129": {}, "SI-130": {}, "SI-131": {}, + "SI-132": {}, "SI-133": {}, "SI-134": {}, "SI-135": {}, "SI-136": {}, + "SI-137": {}, "SI-138": {}, "SI-139": {}, "SI-140": {}, "SI-141": {}, + "SI-142": {}, "SI-143": {}, "SI-144": {}, "SI-146": {}, "SI-147": {}, + "SI-148": {}, "SI-149": {}, "SI-150": {}, "SI-151": {}, "SI-152": {}, + "SI-153": {}, "SI-154": {}, "SI-155": {}, "SI-156": {}, "SI-157": {}, + "SI-158": {}, "SI-159": {}, "SI-160": {}, "SI-161": {}, "SI-162": {}, + "SI-163": {}, "SI-164": {}, "SI-165": {}, "SI-166": {}, "SI-167": {}, + "SI-168": {}, "SI-169": {}, "SI-170": {}, "SI-171": {}, "SI-172": {}, + "SI-173": {}, "SI-174": {}, "SI-175": {}, "SI-176": {}, "SI-177": {}, + "SI-178": {}, "SI-179": {}, "SI-180": {}, "SI-181": {}, "SI-182": {}, + "SI-183": {}, "SI-184": {}, "SI-185": {}, "SI-186": {}, "SI-187": {}, + "SI-188": {}, "SI-189": {}, "SI-190": {}, "SI-191": {}, "SI-192": {}, + "SI-193": {}, "SI-194": {}, "SI-195": {}, "SI-196": {}, "SI-197": {}, + "SI-198": {}, "SI-199": {}, "SI-200": {}, "SI-201": {}, "SI-202": {}, + "SI-203": {}, "SI-204": {}, "SI-205": {}, "SI-206": {}, "SI-207": {}, + "SI-208": {}, "SI-209": {}, "SI-210": {}, "SI-211": {}, "SI-212": {}, "SI-213": {}, "SK-BC": {}, + "SK-BL": {}, "SK-KI": {}, "SK-NI": {}, "SK-PV": {}, "SK-TA": {}, + "SK-TC": {}, "SK-ZI": {}, "SL-E": {}, "SL-N": {}, "SL-S": {}, + "SL-W": {}, "SM-01": {}, "SM-02": {}, "SM-03": {}, "SM-04": {}, + "SM-05": {}, "SM-06": {}, "SM-07": {}, "SM-08": {}, "SM-09": {}, + "SN-DB": {}, "SN-DK": {}, "SN-FK": {}, "SN-KA": {}, "SN-KD": {}, + "SN-KE": {}, "SN-KL": {}, "SN-LG": {}, "SN-MT": {}, "SN-SE": {}, + "SN-SL": {}, "SN-TC": {}, "SN-TH": {}, "SN-ZG": {}, "SO-AW": {}, + "SO-BK": {}, "SO-BN": {}, "SO-BR": {}, "SO-BY": {}, "SO-GA": {}, + "SO-GE": {}, "SO-HI": {}, "SO-JD": {}, "SO-JH": {}, "SO-MU": {}, + "SO-NU": {}, "SO-SA": {}, "SO-SD": {}, "SO-SH": {}, "SO-SO": {}, + "SO-TO": {}, "SO-WO": {}, "SR-BR": {}, "SR-CM": {}, "SR-CR": {}, + "SR-MA": {}, "SR-NI": {}, "SR-PM": {}, "SR-PR": {}, "SR-SA": {}, + "SR-SI": {}, "SR-WA": {}, "SS-BN": {}, "SS-BW": {}, "SS-EC": {}, + "SS-EE8": {}, "SS-EE": {}, "SS-EW": {}, "SS-JG": {}, "SS-LK": {}, "SS-NU": {}, + "SS-UY": {}, "SS-WR": {}, "ST-01": {}, "ST-P": {}, "ST-S": {}, "SV-AH": {}, + "SV-CA": {}, "SV-CH": {}, "SV-CU": {}, "SV-LI": {}, "SV-MO": {}, + "SV-PA": {}, "SV-SA": {}, "SV-SM": {}, "SV-SO": {}, "SV-SS": {}, + "SV-SV": {}, "SV-UN": {}, "SV-US": {}, "SY-DI": {}, "SY-DR": {}, + "SY-DY": {}, "SY-HA": {}, "SY-HI": {}, "SY-HL": {}, "SY-HM": {}, + "SY-ID": {}, "SY-LA": {}, "SY-QU": {}, "SY-RA": {}, "SY-RD": {}, + "SY-SU": {}, "SY-TA": {}, "SZ-HH": {}, "SZ-LU": {}, "SZ-MA": {}, + "SZ-SH": {}, "TD-BA": {}, "TD-BG": {}, "TD-BO": {}, "TD-CB": {}, + "TD-EN": {}, "TD-GR": {}, "TD-HL": {}, "TD-KA": {}, "TD-LC": {}, + "TD-LO": {}, "TD-LR": {}, "TD-MA": {}, "TD-MC": {}, "TD-ME": {}, + "TD-MO": {}, "TD-ND": {}, "TD-OD": {}, "TD-SA": {}, "TD-SI": {}, + "TD-TA": {}, "TD-TI": {}, "TD-WF": {}, "TG-C": {}, "TG-K": {}, + "TG-M": {}, "TG-P": {}, "TG-S": {}, "TH-10": {}, "TH-11": {}, + "TH-12": {}, "TH-13": {}, "TH-14": {}, "TH-15": {}, "TH-16": {}, + "TH-17": {}, "TH-18": {}, "TH-19": {}, "TH-20": {}, "TH-21": {}, + "TH-22": {}, "TH-23": {}, "TH-24": {}, "TH-25": {}, "TH-26": {}, + "TH-27": {}, "TH-30": {}, "TH-31": {}, "TH-32": {}, "TH-33": {}, + "TH-34": {}, "TH-35": {}, "TH-36": {}, "TH-37": {}, "TH-38": {}, "TH-39": {}, + "TH-40": {}, "TH-41": {}, "TH-42": {}, "TH-43": {}, "TH-44": {}, + "TH-45": {}, "TH-46": {}, "TH-47": {}, "TH-48": {}, "TH-49": {}, + "TH-50": {}, "TH-51": {}, "TH-52": {}, "TH-53": {}, "TH-54": {}, + "TH-55": {}, "TH-56": {}, "TH-57": {}, "TH-58": {}, "TH-60": {}, + "TH-61": {}, "TH-62": {}, "TH-63": {}, "TH-64": {}, "TH-65": {}, + "TH-66": {}, "TH-67": {}, "TH-70": {}, "TH-71": {}, "TH-72": {}, + "TH-73": {}, "TH-74": {}, "TH-75": {}, "TH-76": {}, "TH-77": {}, + "TH-80": {}, "TH-81": {}, "TH-82": {}, "TH-83": {}, "TH-84": {}, + "TH-85": {}, "TH-86": {}, "TH-90": {}, "TH-91": {}, "TH-92": {}, + "TH-93": {}, "TH-94": {}, "TH-95": {}, "TH-96": {}, "TH-S": {}, + "TJ-GB": {}, "TJ-KT": {}, "TJ-SU": {}, "TJ-DU": {}, "TJ-RA": {}, "TL-AL": {}, "TL-AN": {}, + "TL-BA": {}, "TL-BO": {}, "TL-CO": {}, "TL-DI": {}, "TL-ER": {}, + "TL-LA": {}, "TL-LI": {}, "TL-MF": {}, "TL-MT": {}, "TL-OE": {}, + "TL-VI": {}, "TM-A": {}, "TM-B": {}, "TM-D": {}, "TM-L": {}, + "TM-M": {}, "TM-S": {}, "TN-11": {}, "TN-12": {}, "TN-13": {}, + "TN-14": {}, "TN-21": {}, "TN-22": {}, "TN-23": {}, "TN-31": {}, + "TN-32": {}, "TN-33": {}, "TN-34": {}, "TN-41": {}, "TN-42": {}, + "TN-43": {}, "TN-51": {}, "TN-52": {}, "TN-53": {}, "TN-61": {}, + "TN-71": {}, "TN-72": {}, "TN-73": {}, "TN-81": {}, "TN-82": {}, + "TN-83": {}, "TO-01": {}, "TO-02": {}, "TO-03": {}, "TO-04": {}, + "TO-05": {}, "TR-01": {}, "TR-02": {}, "TR-03": {}, "TR-04": {}, + "TR-05": {}, "TR-06": {}, "TR-07": {}, "TR-08": {}, "TR-09": {}, + "TR-10": {}, "TR-11": {}, "TR-12": {}, "TR-13": {}, "TR-14": {}, + "TR-15": {}, "TR-16": {}, "TR-17": {}, "TR-18": {}, "TR-19": {}, + "TR-20": {}, "TR-21": {}, "TR-22": {}, "TR-23": {}, "TR-24": {}, + "TR-25": {}, "TR-26": {}, "TR-27": {}, "TR-28": {}, "TR-29": {}, + "TR-30": {}, "TR-31": {}, "TR-32": {}, "TR-33": {}, "TR-34": {}, + "TR-35": {}, "TR-36": {}, "TR-37": {}, "TR-38": {}, "TR-39": {}, + "TR-40": {}, "TR-41": {}, "TR-42": {}, "TR-43": {}, "TR-44": {}, + "TR-45": {}, "TR-46": {}, "TR-47": {}, "TR-48": {}, "TR-49": {}, + "TR-50": {}, "TR-51": {}, "TR-52": {}, "TR-53": {}, "TR-54": {}, + "TR-55": {}, "TR-56": {}, "TR-57": {}, "TR-58": {}, "TR-59": {}, + "TR-60": {}, "TR-61": {}, "TR-62": {}, "TR-63": {}, "TR-64": {}, + "TR-65": {}, "TR-66": {}, "TR-67": {}, "TR-68": {}, "TR-69": {}, + "TR-70": {}, "TR-71": {}, "TR-72": {}, "TR-73": {}, "TR-74": {}, + "TR-75": {}, "TR-76": {}, "TR-77": {}, "TR-78": {}, "TR-79": {}, + "TR-80": {}, "TR-81": {}, "TT-ARI": {}, "TT-CHA": {}, "TT-CTT": {}, + "TT-DMN": {}, "TT-ETO": {}, "TT-MRC": {}, "TT-TOB": {}, "TT-PED": {}, "TT-POS": {}, "TT-PRT": {}, + "TT-PTF": {}, "TT-RCM": {}, "TT-SFO": {}, "TT-SGE": {}, "TT-SIP": {}, + "TT-SJL": {}, "TT-TUP": {}, "TT-WTO": {}, "TV-FUN": {}, "TV-NIT": {}, + "TV-NKF": {}, "TV-NKL": {}, "TV-NMA": {}, "TV-NMG": {}, "TV-NUI": {}, + "TV-VAI": {}, "TW-CHA": {}, "TW-CYI": {}, "TW-CYQ": {}, "TW-KIN": {}, "TW-HSQ": {}, + "TW-HSZ": {}, "TW-HUA": {}, "TW-LIE": {}, "TW-ILA": {}, "TW-KEE": {}, "TW-KHH": {}, + "TW-KHQ": {}, "TW-MIA": {}, "TW-NAN": {}, "TW-NWT": {}, "TW-PEN": {}, "TW-PIF": {}, + "TW-TAO": {}, "TW-TNN": {}, "TW-TNQ": {}, "TW-TPE": {}, "TW-TPQ": {}, + "TW-TTT": {}, "TW-TXG": {}, "TW-TXQ": {}, "TW-YUN": {}, "TZ-01": {}, + "TZ-02": {}, "TZ-03": {}, "TZ-04": {}, "TZ-05": {}, "TZ-06": {}, + "TZ-07": {}, "TZ-08": {}, "TZ-09": {}, "TZ-10": {}, "TZ-11": {}, + "TZ-12": {}, "TZ-13": {}, "TZ-14": {}, "TZ-15": {}, "TZ-16": {}, + "TZ-17": {}, "TZ-18": {}, "TZ-19": {}, "TZ-20": {}, "TZ-21": {}, + "TZ-22": {}, "TZ-23": {}, "TZ-24": {}, "TZ-25": {}, "TZ-26": {}, "TZ-27": {}, "TZ-28": {}, "TZ-29": {}, "TZ-30": {}, "TZ-31": {}, + "UA-05": {}, "UA-07": {}, "UA-09": {}, "UA-12": {}, "UA-14": {}, + "UA-18": {}, "UA-21": {}, "UA-23": {}, "UA-26": {}, "UA-30": {}, + "UA-32": {}, "UA-35": {}, "UA-40": {}, "UA-43": {}, "UA-46": {}, + "UA-48": {}, "UA-51": {}, "UA-53": {}, "UA-56": {}, "UA-59": {}, + "UA-61": {}, "UA-63": {}, "UA-65": {}, "UA-68": {}, "UA-71": {}, + "UA-74": {}, "UA-77": {}, "UG-101": {}, "UG-102": {}, "UG-103": {}, + "UG-104": {}, "UG-105": {}, "UG-106": {}, "UG-107": {}, "UG-108": {}, + "UG-109": {}, "UG-110": {}, "UG-111": {}, "UG-112": {}, "UG-113": {}, + "UG-114": {}, "UG-115": {}, "UG-116": {}, "UG-201": {}, "UG-202": {}, + "UG-203": {}, "UG-204": {}, "UG-205": {}, "UG-206": {}, "UG-207": {}, + "UG-208": {}, "UG-209": {}, "UG-210": {}, "UG-211": {}, "UG-212": {}, + "UG-213": {}, "UG-214": {}, "UG-215": {}, "UG-216": {}, "UG-217": {}, + "UG-218": {}, "UG-219": {}, "UG-220": {}, "UG-221": {}, "UG-222": {}, + "UG-223": {}, "UG-224": {}, "UG-301": {}, "UG-302": {}, "UG-303": {}, + "UG-304": {}, "UG-305": {}, "UG-306": {}, "UG-307": {}, "UG-308": {}, + "UG-309": {}, "UG-310": {}, "UG-311": {}, "UG-312": {}, "UG-313": {}, + "UG-314": {}, "UG-315": {}, "UG-316": {}, "UG-317": {}, "UG-318": {}, + "UG-319": {}, "UG-320": {}, "UG-321": {}, "UG-401": {}, "UG-402": {}, + "UG-403": {}, "UG-404": {}, "UG-405": {}, "UG-406": {}, "UG-407": {}, + "UG-408": {}, "UG-409": {}, "UG-410": {}, "UG-411": {}, "UG-412": {}, + "UG-413": {}, "UG-414": {}, "UG-415": {}, "UG-416": {}, "UG-417": {}, + "UG-418": {}, "UG-419": {}, "UG-C": {}, "UG-E": {}, "UG-N": {}, + "UG-W": {}, "UG-322": {}, "UG-323": {}, "UG-420": {}, "UG-117": {}, + "UG-118": {}, "UG-225": {}, "UG-120": {}, "UG-226": {}, + "UG-121": {}, "UG-122": {}, "UG-227": {}, "UG-421": {}, + "UG-325": {}, "UG-228": {}, "UG-123": {}, "UG-422": {}, + "UG-326": {}, "UG-229": {}, "UG-124": {}, "UG-423": {}, + "UG-230": {}, "UG-327": {}, "UG-424": {}, "UG-328": {}, + "UG-425": {}, "UG-426": {}, "UG-330": {}, + "UM-67": {}, "UM-71": {}, "UM-76": {}, "UM-79": {}, + "UM-81": {}, "UM-84": {}, "UM-86": {}, "UM-89": {}, "UM-95": {}, + "US-AK": {}, "US-AL": {}, "US-AR": {}, "US-AS": {}, "US-AZ": {}, + "US-CA": {}, "US-CO": {}, "US-CT": {}, "US-DC": {}, "US-DE": {}, + "US-FL": {}, "US-GA": {}, "US-GU": {}, "US-HI": {}, "US-IA": {}, + "US-ID": {}, "US-IL": {}, "US-IN": {}, "US-KS": {}, "US-KY": {}, + "US-LA": {}, "US-MA": {}, "US-MD": {}, "US-ME": {}, "US-MI": {}, + "US-MN": {}, "US-MO": {}, "US-MP": {}, "US-MS": {}, "US-MT": {}, + "US-NC": {}, "US-ND": {}, "US-NE": {}, "US-NH": {}, "US-NJ": {}, + "US-NM": {}, "US-NV": {}, "US-NY": {}, "US-OH": {}, "US-OK": {}, + "US-OR": {}, "US-PA": {}, "US-PR": {}, "US-RI": {}, "US-SC": {}, + "US-SD": {}, "US-TN": {}, "US-TX": {}, "US-UM": {}, "US-UT": {}, + "US-VA": {}, "US-VI": {}, "US-VT": {}, "US-WA": {}, "US-WI": {}, + "US-WV": {}, "US-WY": {}, "UY-AR": {}, "UY-CA": {}, "UY-CL": {}, + "UY-CO": {}, "UY-DU": {}, "UY-FD": {}, "UY-FS": {}, "UY-LA": {}, + "UY-MA": {}, "UY-MO": {}, "UY-PA": {}, "UY-RN": {}, "UY-RO": {}, + "UY-RV": {}, "UY-SA": {}, "UY-SJ": {}, "UY-SO": {}, "UY-TA": {}, + "UY-TT": {}, "UZ-AN": {}, "UZ-BU": {}, "UZ-FA": {}, "UZ-JI": {}, + "UZ-NG": {}, "UZ-NW": {}, "UZ-QA": {}, "UZ-QR": {}, "UZ-SA": {}, + "UZ-SI": {}, "UZ-SU": {}, "UZ-TK": {}, "UZ-TO": {}, "UZ-XO": {}, + "VC-01": {}, "VC-02": {}, "VC-03": {}, "VC-04": {}, "VC-05": {}, + "VC-06": {}, "VE-A": {}, "VE-B": {}, "VE-C": {}, "VE-D": {}, + "VE-E": {}, "VE-F": {}, "VE-G": {}, "VE-H": {}, "VE-I": {}, + "VE-J": {}, "VE-K": {}, "VE-L": {}, "VE-M": {}, "VE-N": {}, + "VE-O": {}, "VE-P": {}, "VE-R": {}, "VE-S": {}, "VE-T": {}, + "VE-U": {}, "VE-V": {}, "VE-W": {}, "VE-X": {}, "VE-Y": {}, + "VE-Z": {}, "VN-01": {}, "VN-02": {}, "VN-03": {}, "VN-04": {}, + "VN-05": {}, "VN-06": {}, "VN-07": {}, "VN-09": {}, "VN-13": {}, + "VN-14": {}, "VN-15": {}, "VN-18": {}, "VN-20": {}, "VN-21": {}, + "VN-22": {}, "VN-23": {}, "VN-24": {}, "VN-25": {}, "VN-26": {}, + "VN-27": {}, "VN-28": {}, "VN-29": {}, "VN-30": {}, "VN-31": {}, + "VN-32": {}, "VN-33": {}, "VN-34": {}, "VN-35": {}, "VN-36": {}, + "VN-37": {}, "VN-39": {}, "VN-40": {}, "VN-41": {}, "VN-43": {}, + "VN-44": {}, "VN-45": {}, "VN-46": {}, "VN-47": {}, "VN-49": {}, + "VN-50": {}, "VN-51": {}, "VN-52": {}, "VN-53": {}, "VN-54": {}, + "VN-55": {}, "VN-56": {}, "VN-57": {}, "VN-58": {}, "VN-59": {}, + "VN-61": {}, "VN-63": {}, "VN-66": {}, "VN-67": {}, "VN-68": {}, + "VN-69": {}, "VN-70": {}, "VN-71": {}, "VN-72": {}, "VN-73": {}, + "VN-CT": {}, "VN-DN": {}, "VN-HN": {}, "VN-HP": {}, "VN-SG": {}, + "VU-MAP": {}, "VU-PAM": {}, "VU-SAM": {}, "VU-SEE": {}, "VU-TAE": {}, + "VU-TOB": {}, "WF-SG": {}, "WF-UV": {}, "WS-AA": {}, "WS-AL": {}, "WS-AT": {}, "WS-FA": {}, + "WS-GE": {}, "WS-GI": {}, "WS-PA": {}, "WS-SA": {}, "WS-TU": {}, + "WS-VF": {}, "WS-VS": {}, "YE-AB": {}, "YE-AD": {}, "YE-AM": {}, + "YE-BA": {}, "YE-DA": {}, "YE-DH": {}, "YE-HD": {}, "YE-HJ": {}, "YE-HU": {}, + "YE-IB": {}, "YE-JA": {}, "YE-LA": {}, "YE-MA": {}, "YE-MR": {}, + "YE-MU": {}, "YE-MW": {}, "YE-RA": {}, "YE-SA": {}, "YE-SD": {}, "YE-SH": {}, + "YE-SN": {}, "YE-TA": {}, "ZA-EC": {}, "ZA-FS": {}, "ZA-GP": {}, + "ZA-LP": {}, "ZA-MP": {}, "ZA-NC": {}, "ZA-NW": {}, "ZA-WC": {}, + "ZA-ZN": {}, "ZA-KZN": {}, "ZM-01": {}, "ZM-02": {}, "ZM-03": {}, "ZM-04": {}, + "ZM-05": {}, "ZM-06": {}, "ZM-07": {}, "ZM-08": {}, "ZM-09": {}, "ZM-10": {}, + "ZW-BU": {}, "ZW-HA": {}, "ZW-MA": {}, "ZW-MC": {}, "ZW-ME": {}, + "ZW-MI": {}, "ZW-MN": {}, "ZW-MS": {}, "ZW-MV": {}, "ZW-MW": {}, +} diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go new file mode 100644 index 0000000000..d0317f89cc --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/currency_codes.go @@ -0,0 +1,79 @@ +package validator + +var iso4217 = map[string]struct{}{ + "AFN": {}, "EUR": {}, "ALL": {}, "DZD": {}, "USD": {}, + "AOA": {}, "XCD": {}, "ARS": {}, "AMD": {}, "AWG": {}, + "AUD": {}, "AZN": {}, "BSD": {}, "BHD": {}, "BDT": {}, + "BBD": {}, "BYN": {}, "BZD": {}, "XOF": {}, "BMD": {}, + "INR": {}, "BTN": {}, "BOB": {}, "BOV": {}, "BAM": {}, + "BWP": {}, "NOK": {}, "BRL": {}, "BND": {}, "BGN": {}, + "BIF": {}, "CVE": {}, "KHR": {}, "XAF": {}, "CAD": {}, + "KYD": {}, "CLP": {}, "CLF": {}, "CNY": {}, "COP": {}, + "COU": {}, "KMF": {}, "CDF": {}, "NZD": {}, "CRC": {}, + "HRK": {}, "CUP": {}, "CUC": {}, "ANG": {}, "CZK": {}, + "DKK": {}, "DJF": {}, "DOP": {}, "EGP": {}, "SVC": {}, + "ERN": {}, "SZL": {}, "ETB": {}, "FKP": {}, "FJD": {}, + "XPF": {}, "GMD": {}, "GEL": {}, "GHS": {}, "GIP": {}, + "GTQ": {}, "GBP": {}, "GNF": {}, "GYD": {}, "HTG": {}, + "HNL": {}, "HKD": {}, "HUF": {}, "ISK": {}, "IDR": {}, + "XDR": {}, "IRR": {}, "IQD": {}, "ILS": {}, "JMD": {}, + "JPY": {}, "JOD": {}, "KZT": {}, "KES": {}, "KPW": {}, + "KRW": {}, "KWD": {}, "KGS": {}, "LAK": {}, "LBP": {}, + "LSL": {}, "ZAR": {}, "LRD": {}, "LYD": {}, "CHF": {}, + "MOP": {}, "MKD": {}, "MGA": {}, "MWK": {}, "MYR": {}, + "MVR": {}, "MRU": {}, "MUR": {}, "XUA": {}, "MXN": {}, + "MXV": {}, "MDL": {}, "MNT": {}, "MAD": {}, "MZN": {}, + "MMK": {}, "NAD": {}, "NPR": {}, "NIO": {}, "NGN": {}, + "OMR": {}, "PKR": {}, "PAB": {}, "PGK": {}, "PYG": {}, + "PEN": {}, "PHP": {}, "PLN": {}, "QAR": {}, "RON": {}, + "RUB": {}, "RWF": {}, "SHP": {}, "WST": {}, "STN": {}, + "SAR": {}, "RSD": {}, "SCR": {}, "SLL": {}, "SGD": {}, + "XSU": {}, "SBD": {}, "SOS": {}, "SSP": {}, "LKR": {}, + "SDG": {}, "SRD": {}, "SEK": {}, "CHE": {}, "CHW": {}, + "SYP": {}, "TWD": {}, "TJS": {}, "TZS": {}, "THB": {}, + "TOP": {}, "TTD": {}, "TND": {}, "TRY": {}, "TMT": {}, + "UGX": {}, "UAH": {}, "AED": {}, "USN": {}, "UYU": {}, + "UYI": {}, "UYW": {}, "UZS": {}, "VUV": {}, "VES": {}, + "VND": {}, "YER": {}, "ZMW": {}, "ZWL": {}, "XBA": {}, + "XBB": {}, "XBC": {}, "XBD": {}, "XTS": {}, "XXX": {}, + "XAU": {}, "XPD": {}, "XPT": {}, "XAG": {}, +} + +var iso4217_numeric = map[int]struct{}{ + 8: {}, 12: {}, 32: {}, 36: {}, 44: {}, + 48: {}, 50: {}, 51: {}, 52: {}, 60: {}, + 64: {}, 68: {}, 72: {}, 84: {}, 90: {}, + 96: {}, 104: {}, 108: {}, 116: {}, 124: {}, + 132: {}, 136: {}, 144: {}, 152: {}, 156: {}, + 170: {}, 174: {}, 188: {}, 191: {}, 192: {}, + 203: {}, 208: {}, 214: {}, 222: {}, 230: {}, + 232: {}, 238: {}, 242: {}, 262: {}, 270: {}, + 292: {}, 320: {}, 324: {}, 328: {}, 332: {}, + 340: {}, 344: {}, 348: {}, 352: {}, 356: {}, + 360: {}, 364: {}, 368: {}, 376: {}, 388: {}, + 392: {}, 398: {}, 400: {}, 404: {}, 408: {}, + 410: {}, 414: {}, 417: {}, 418: {}, 422: {}, + 426: {}, 430: {}, 434: {}, 446: {}, 454: {}, + 458: {}, 462: {}, 480: {}, 484: {}, 496: {}, + 498: {}, 504: {}, 512: {}, 516: {}, 524: {}, + 532: {}, 533: {}, 548: {}, 554: {}, 558: {}, + 566: {}, 578: {}, 586: {}, 590: {}, 598: {}, + 600: {}, 604: {}, 608: {}, 634: {}, 643: {}, + 646: {}, 654: {}, 682: {}, 690: {}, 694: {}, + 702: {}, 704: {}, 706: {}, 710: {}, 728: {}, + 748: {}, 752: {}, 756: {}, 760: {}, 764: {}, + 776: {}, 780: {}, 784: {}, 788: {}, 800: {}, + 807: {}, 818: {}, 826: {}, 834: {}, 840: {}, + 858: {}, 860: {}, 882: {}, 886: {}, 901: {}, + 927: {}, 928: {}, 929: {}, 930: {}, 931: {}, + 932: {}, 933: {}, 934: {}, 936: {}, 938: {}, + 940: {}, 941: {}, 943: {}, 944: {}, 946: {}, + 947: {}, 948: {}, 949: {}, 950: {}, 951: {}, + 952: {}, 953: {}, 955: {}, 956: {}, 957: {}, + 958: {}, 959: {}, 960: {}, 961: {}, 962: {}, + 963: {}, 964: {}, 965: {}, 967: {}, 968: {}, + 969: {}, 970: {}, 971: {}, 972: {}, 973: {}, + 975: {}, 976: {}, 977: {}, 978: {}, 979: {}, + 980: {}, 981: {}, 984: {}, 985: {}, 986: {}, + 990: {}, 994: {}, 997: {}, 999: {}, +} diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go new file mode 100644 index 0000000000..52918e4093 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -0,0 +1,1531 @@ +/* +Package validator implements value validations for structs and individual fields +based on tags. + +It can also handle Cross-Field and Cross-Struct validation for nested structs +and has the ability to dive into arrays and maps of any type. + +see more examples https://github.com/go-playground/validator/tree/master/_examples + +# Singleton + +Validator is designed to be thread-safe and used as a singleton instance. +It caches information about your struct and validations, +in essence only parsing your validation tags once per struct type. +Using multiple instances neglects the benefit of caching. +The not thread-safe functions are explicitly marked as such in the documentation. + +# Validation Functions Return Type error + +Doing things this way is actually the way the standard library does, see the +file.Open method here: + + https://golang.org/pkg/os/#Open. + +The authors return type "error" to avoid the issue discussed in the following, +where err is always != nil: + + http://stackoverflow.com/a/29138676/3158232 + https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or +ValidationErrors as type error; so, in your code all you need to do is check +if the error returned is not nil, and if it's not check if error is +InvalidValidationError ( if necessary, most of the time it isn't ) type cast +it to type ValidationErrors like so err.(validator.ValidationErrors). + +# Custom Validation Functions + +Custom Validation functions can be added. Example: + + // Structure + func customFunc(fl validator.FieldLevel) bool { + + if fl.Field().String() == "invalid" { + return false + } + + return true + } + + validate.RegisterValidation("custom tag name", customFunc) + // NOTES: using the same tag name as an existing function + // will overwrite the existing one + +# Cross-Field Validation + +Cross-Field Validation can be done via the following tags: + - eqfield + - nefield + - gtfield + - gtefield + - ltfield + - ltefield + - eqcsfield + - necsfield + - gtcsfield + - gtecsfield + - ltcsfield + - ltecsfield + +If, however, some custom cross-field validation is required, it can be done +using a custom validation. + +Why not just have cross-fields validation tags (i.e. only eqcsfield and not +eqfield)? + +The reason is efficiency. If you want to check a field within the same struct +"eqfield" only has to find the field on the same struct (1 level). But, if we +used "eqcsfield" it could be multiple levels down. Example: + + type Inner struct { + StartDate time.Time + } + + type Outer struct { + InnerStructField *Inner + CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` + } + + now := time.Now() + + inner := &Inner{ + StartDate: now, + } + + outer := &Outer{ + InnerStructField: inner, + CreatedAt: now, + } + + errs := validate.Struct(outer) + + // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed + // into the function + // when calling validate.VarWithValue(val, field, tag) val will be + // whatever you pass, struct, field... + // when calling validate.Field(field, tag) val will be nil + +# Multiple Validators + +Multiple validators on a field will process in the order defined. Example: + + type Test struct { + Field `validate:"max=10,min=1"` + } + + // max will be checked then min + +Bad Validator definitions are not handled by the library. Example: + + type Test struct { + Field `validate:"min=10,max=0"` + } + + // this definition of min max will never succeed + +# Using Validator Tags + +Baked In Cross-Field validation only compares fields on the same struct. +If Cross-Field + Cross-Struct validation is needed you should implement your +own custom validator. + +Comma (",") is the default separator of validation tags. If you wish to +have a comma included within the parameter (i.e. excludesall=,) you will need to +use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, +so the above will become excludesall=0x2C. + + type Test struct { + Field `validate:"excludesall=,"` // BAD! Do not include a comma. + Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. + } + +Pipe ("|") is the 'or' validation tags deparator. If you wish to +have a pipe included within the parameter i.e. excludesall=| you will need to +use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, +so the above will become excludesall=0x7C + + type Test struct { + Field `validate:"excludesall=|"` // BAD! Do not include a pipe! + Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. + } + +# Baked In Validators and Tags + +Here is a list of the current built in validators: + +# Skip Field + +Tells the validation to skip this struct field; this is particularly +handy in ignoring embedded structs from being validated. (Usage: -) + + Usage: - + +# Or Operator + +This is the 'or' operator allowing multiple validators to be used and +accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba +colors to be accepted. This can also be combined with 'and' for example +( Usage: omitempty,rgb|rgba) + + Usage: | + +# StructOnly + +When a field that is a nested struct is encountered, and contains this flag +any validation on the nested struct will be run, but none of the nested +struct fields will be validated. This is useful if inside of your program +you know the struct will be valid, but need to verify it has been assigned. +NOTE: only "required" and "omitempty" can be used on a struct itself. + + Usage: structonly + +# NoStructLevel + +Same as structonly tag except that any struct level validations will not run. + + Usage: nostructlevel + +# Omit Empty + +Allows conditional validation, for example, if a field is not set with +a value (Determined by the "required" validator) then other validation +such as min or max won't run, but if a value is set validation will run. + + Usage: omitempty + +# Omit Nil + +Allows to skip the validation if the value is nil (same as omitempty, but +only for the nil-values). + + Usage: omitnil + +# Dive + +This tells the validator to dive into a slice, array or map and validate that +level of the slice, array or map with the validation tags that follow. +Multidimensional nesting is also supported, each level you wish to dive will +require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see +the Keys & EndKeys section just below. + + Usage: dive + +Example #1 + + [][]string with validation tag "gt=0,dive,len=1,dive,required" + // gt=0 will be applied to [] + // len=1 will be applied to []string + // required will be applied to string + +Example #2 + + [][]string with validation tag "gt=0,dive,dive,required" + // gt=0 will be applied to [] + // []string will be spared validation + // required will be applied to string + +Keys & EndKeys + +These are to be used together directly after the dive tag and tells the validator +that anything between 'keys' and 'endkeys' applies to the keys of a map and not the +values; think of it like the 'dive' tag, but for map keys instead of values. +Multidimensional nesting is also supported, each level you wish to validate will +require another 'keys' and 'endkeys' tag. These tags are only valid for maps. + + Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags + +Example #1 + + map[string]string with validation tag "gt=0,dive,keys,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eq=1|eq=2 will be applied to the map keys + // required will be applied to map values + +Example #2 + + map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eq=1|eq=2 will be applied to each array element in the map keys + // required will be applied to map values + +# Required + +This validates that the value is not the data types default zero value. +For numbers ensures value is not zero. For strings ensures value is +not "". For booleans ensures value is not false. For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. For structs ensures value is not the zero value when using WithRequiredStructEnabled. + + Usage: required + +# Required If + +The field under validation must be present and not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value. +Using the same field name multiple times in the parameters will result in a panic at runtime. + + Usage: required_if + +Examples: + + // require the field if the Field1 is equal to the parameter given: + Usage: required_if=Field1 foobar + + // require the field if the Field1 and Field2 is equal to the value respectively: + Usage: required_if=Field1 foo Field2 bar + +# Required Unless + +The field under validation must be present and not empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: required_unless + +Examples: + + // require the field unless the Field1 is equal to the parameter given: + Usage: required_unless=Field1 foobar + + // require the field unless the Field1 and Field2 is equal to the value respectively: + Usage: required_unless=Field1 foo Field2 bar + +# Required With + +The field under validation must be present and not empty only if any +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: required_with + +Examples: + + // require the field if the Field1 is present: + Usage: required_with=Field1 + + // require the field if the Field1 or Field2 is present: + Usage: required_with=Field1 Field2 + +# Required With All + +The field under validation must be present and not empty only if all +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: required_with_all + +Example: + + // require the field if the Field1 and Field2 is present: + Usage: required_with_all=Field1 Field2 + +# Required Without + +The field under validation must be present and not empty only when any +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: required_without + +Examples: + + // require the field if the Field1 is not present: + Usage: required_without=Field1 + + // require the field if the Field1 or Field2 is not present: + Usage: required_without=Field1 Field2 + +# Required Without All + +The field under validation must be present and not empty only when all +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: required_without_all + +Example: + + // require the field if the Field1 and Field2 is not present: + Usage: required_without_all=Field1 Field2 + +# Excluded If + +The field under validation must not be present or not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: excluded_if + +Examples: + + // exclude the field if the Field1 is equal to the parameter given: + Usage: excluded_if=Field1 foobar + + // exclude the field if the Field1 and Field2 is equal to the value respectively: + Usage: excluded_if=Field1 foo Field2 bar + +# Excluded Unless + +The field under validation must not be present or empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value. + + Usage: excluded_unless + +Examples: + + // exclude the field unless the Field1 is equal to the parameter given: + Usage: excluded_unless=Field1 foobar + + // exclude the field unless the Field1 and Field2 is equal to the value respectively: + Usage: excluded_unless=Field1 foo Field2 bar + +# Is Default + +This validates that the value is the default value and is almost the +opposite of required. + + Usage: isdefault + +# Length + +For numbers, length will ensure that the value is +equal to the parameter given. For strings, it checks that +the string length is exactly that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: len=10 + +Example #2 (time.Duration) + +For time.Duration, len will ensure that the value is equal to the duration given +in the parameter. + + Usage: len=1h30m + +# Maximum + +For numbers, max will ensure that the value is +less than or equal to the parameter given. For strings, it checks +that the string length is at most that number of characters. For +slices, arrays, and maps, validates the number of items. + +Example #1 + + Usage: max=10 + +Example #2 (time.Duration) + +For time.Duration, max will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: max=1h30m + +# Minimum + +For numbers, min will ensure that the value is +greater or equal to the parameter given. For strings, it checks that +the string length is at least that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: min=10 + +Example #2 (time.Duration) + +For time.Duration, min will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: min=1h30m + +# Equals + +For strings & numbers, eq will ensure that the value is +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: eq=10 + +Example #2 (time.Duration) + +For time.Duration, eq will ensure that the value is equal to the duration given +in the parameter. + + Usage: eq=1h30m + +# Not Equal + +For strings & numbers, ne will ensure that the value is not +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: ne=10 + +Example #2 (time.Duration) + +For time.Duration, ne will ensure that the value is not equal to the duration +given in the parameter. + + Usage: ne=1h30m + +# One Of + +For strings, ints, and uints, oneof will ensure that the value +is one of the values in the parameter. The parameter should be +a list of values separated by whitespace. Values may be +strings or numbers. To match strings with spaces in them, include +the target string between single quotes. Kind of like an 'enum'. + + Usage: oneof=red green + oneof='red green' 'blue yellow' + oneof=5 7 9 + +# One Of Case Insensitive + +Works the same as oneof but is case insensitive and therefore only accepts strings. + + Usage: oneofci=red green + oneofci='red green' 'blue yellow' + +# Greater Than + +For numbers, this will ensure that the value is greater than the +parameter given. For strings, it checks that the string length +is greater than that number of characters. For slices, arrays +and maps it validates the number of items. + +Example #1 + + Usage: gt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than time.Now.UTC(). + + Usage: gt + +Example #3 (time.Duration) + +For time.Duration, gt will ensure that the value is greater than the duration +given in the parameter. + + Usage: gt=1h30m + +# Greater Than or Equal + +Same as 'min' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: gte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than or equal to time.Now.UTC(). + + Usage: gte + +Example #3 (time.Duration) + +For time.Duration, gte will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: gte=1h30m + +# Less Than + +For numbers, this will ensure that the value is less than the parameter given. +For strings, it checks that the string length is less than that number of +characters. For slices, arrays, and maps it validates the number of items. + +Example #1 + + Usage: lt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than time.Now.UTC(). + + Usage: lt + +Example #3 (time.Duration) + +For time.Duration, lt will ensure that the value is less than the duration given +in the parameter. + + Usage: lt=1h30m + +# Less Than or Equal + +Same as 'max' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: lte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than or equal to time.Now.UTC(). + + Usage: lte + +Example #3 (time.Duration) + +For time.Duration, lte will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: lte=1h30m + +# Field Equals Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Example #1: + + // Validation on Password field using: + Usage: eqfield=ConfirmPassword + +Example #2: + + // Validating by field: + validate.VarWithValue(password, confirmpassword, "eqfield") + +Field Equals Another Field (relative) + +This does the same as eqfield except that it validates the field provided relative +to the top level struct. + + Usage: eqcsfield=InnerStructField.Field) + +# Field Does Not Equal Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Examples: + + // Confirm two colors are not the same: + // + // Validation on Color field: + Usage: nefield=Color2 + + // Validating by field: + validate.VarWithValue(color1, color2, "nefield") + +Field Does Not Equal Another Field (relative) + +This does the same as nefield except that it validates the field provided +relative to the top level struct. + + Usage: necsfield=InnerStructField.Field + +# Field Greater Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtfield") + +# Field Greater Than Another Relative Field + +This does the same as gtfield except that it validates the field provided +relative to the top level struct. + + Usage: gtcsfield=InnerStructField.Field + +# Field Greater Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtefield") + +# Field Greater Than or Equal To Another Relative Field + +This does the same as gtefield except that it validates the field provided relative +to the top level struct. + + Usage: gtecsfield=InnerStructField.Field + +# Less Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltfield") + +# Less Than Another Relative Field + +This does the same as ltfield except that it validates the field provided relative +to the top level struct. + + Usage: ltcsfield=InnerStructField.Field + +# Less Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltefield") + +# Less Than or Equal To Another Relative Field + +This does the same as ltefield except that it validates the field provided relative +to the top level struct. + + Usage: ltecsfield=InnerStructField.Field + +# Field Contains Another Field + +This does the same as contains except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: containsfield=InnerStructField.Field + +# Field Excludes Another Field + +This does the same as excludes except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: excludesfield=InnerStructField.Field + +# Unique + +For arrays & slices, unique will ensure that there are no duplicates. +For maps, unique will ensure that there are no duplicate values. +For slices of struct, unique will ensure that there are no duplicate values +in a field of the struct specified via a parameter. + + // For arrays, slices, and maps: + Usage: unique + + // For slices of struct: + Usage: unique=field + +# ValidateFn + +This validates that an object responds to a method that can return error or bool. +By default it expects an interface `Validate() error` and check that the method +does not return an error. Other methods can be specified using two signatures: +If the method returns an error, it check if the return value is nil. +If the method returns a boolean, it checks if the value is true. + + // to use the default method Validate() error + Usage: validateFn + + // to use the custom method IsValid() bool (or error) + Usage: validateFn=IsValid + +# Alpha Only + +This validates that a string value contains ASCII alpha characters only + + Usage: alpha + +# Alpha Space + +This validates that a string value contains ASCII alpha characters and spaces only + + Usage: alphaspace + +# Alphanumeric + +This validates that a string value contains ASCII alphanumeric characters only + + Usage: alphanum + +# Alpha Unicode + +This validates that a string value contains unicode alpha characters only + + Usage: alphaunicode + +# Alphanumeric Unicode + +This validates that a string value contains unicode alphanumeric characters only + + Usage: alphanumunicode + +# Boolean + +This validates that a string value can successfully be parsed into a boolean with strconv.ParseBool + + Usage: boolean + +# Number + +This validates that a string value contains number values only. +For integers or float it returns true. + + Usage: number + +# Numeric + +This validates that a string value contains a basic numeric value. +basic excludes exponents etc... +for integers or float it returns true. + + Usage: numeric + +# Hexadecimal String + +This validates that a string value contains a valid hexadecimal. + + Usage: hexadecimal + +# Hexcolor String + +This validates that a string value contains a valid hex color including +hashtag (#) + + Usage: hexcolor + +# Lowercase String + +This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string. + + Usage: lowercase + +# Uppercase String + +This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string. + + Usage: uppercase + +# RGB String + +This validates that a string value contains a valid rgb color + + Usage: rgb + +# RGBA String + +This validates that a string value contains a valid rgba color + + Usage: rgba + +# HSL String + +This validates that a string value contains a valid hsl color + + Usage: hsl + +# HSLA String + +This validates that a string value contains a valid hsla color + + Usage: hsla + +# E.164 Phone Number String + +This validates that a string value contains a valid E.164 Phone number +https://en.wikipedia.org/wiki/E.164 (ex. +1123456789) + + Usage: e164 + +# E-mail String + +This validates that a string value contains a valid email +This may not conform to all possibilities of any rfc standard, but neither +does any email provider accept all possibilities. + + Usage: email + +# JSON String + +This validates that a string value is valid JSON + + Usage: json + +# JWT String + +This validates that a string value is a valid JWT + + Usage: jwt + +# File + +This validates that a string value contains a valid file path and that +the file exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: file + +# Image path + +This validates that a string value contains a valid file path and that +the file exists on the machine and is an image. +This is done using os.Stat and github.com/gabriel-vasile/mimetype + + Usage: image + +# File Path + +This validates that a string value contains a valid file path but does not +validate the existence of that file. +This is done using os.Stat, which is a platform independent function. + + Usage: filepath + +# URL String + +This validates that a string value contains a valid url +This will accept any url the golang request uri accepts but must contain +a schema for example http:// or rtmp:// + + Usage: url + +# URI String + +This validates that a string value contains a valid uri +This will accept any uri the golang request uri accepts + + Usage: uri + +# Urn RFC 2141 String + +This validates that a string value contains a valid URN +according to the RFC 2141 spec. + + Usage: urn_rfc2141 + +# Base32 String + +This validates that a string value contains a valid bas324 value. +Although an empty string is valid base32 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base32 + +# Base64 String + +This validates that a string value contains a valid base64 value. +Although an empty string is valid base64 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base64 + +# Base64URL String + +This validates that a string value contains a valid base64 URL safe value +according the RFC4648 spec. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64url + +# Base64RawURL String + +This validates that a string value contains a valid base64 URL safe value, +but without = padding, according the RFC4648 spec, section 3.2. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64rawurl + +# Bitcoin Address + +This validates that a string value contains a valid bitcoin address. +The format of the string is checked to ensure it matches one of the three formats +P2PKH, P2SH and performs checksum validation. + + Usage: btc_addr + +Bitcoin Bech32 Address (segwit) + +This validates that a string value contains a valid bitcoin Bech32 address as defined +by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) +Special thanks to Pieter Wuille for providing reference implementations. + + Usage: btc_addr_bech32 + +# Ethereum Address + +This validates that a string value contains a valid ethereum address. +The format of the string is checked to ensure it matches the standard Ethereum address format. + + Usage: eth_addr + +# Contains + +This validates that a string value contains the substring value. + + Usage: contains=@ + +# Contains Any + +This validates that a string value contains any Unicode code points +in the substring value. + + Usage: containsany=!@#? + +# Contains Rune + +This validates that a string value contains the supplied rune value. + + Usage: containsrune=@ + +# Excludes + +This validates that a string value does not contain the substring value. + + Usage: excludes=@ + +# Excludes All + +This validates that a string value does not contain any Unicode code +points in the substring value. + + Usage: excludesall=!@#? + +# Excludes Rune + +This validates that a string value does not contain the supplied rune value. + + Usage: excludesrune=@ + +# Starts With + +This validates that a string value starts with the supplied string value + + Usage: startswith=hello + +# Ends With + +This validates that a string value ends with the supplied string value + + Usage: endswith=goodbye + +# Does Not Start With + +This validates that a string value does not start with the supplied string value + + Usage: startsnotwith=hello + +# Does Not End With + +This validates that a string value does not end with the supplied string value + + Usage: endsnotwith=goodbye + +# International Standard Book Number + +This validates that a string value contains a valid isbn10 or isbn13 value. + + Usage: isbn + +# International Standard Book Number 10 + +This validates that a string value contains a valid isbn10 value. + + Usage: isbn10 + +# International Standard Book Number 13 + +This validates that a string value contains a valid isbn13 value. + + Usage: isbn13 + +# Universally Unique Identifier UUID + +This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead. + + Usage: uuid + +# Universally Unique Identifier UUID v3 + +This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead. + + Usage: uuid3 + +# Universally Unique Identifier UUID v4 + +This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead. + + Usage: uuid4 + +# Universally Unique Identifier UUID v5 + +This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead. + + Usage: uuid5 + +# Universally Unique Lexicographically Sortable Identifier ULID + +This validates that a string value contains a valid ULID value. + + Usage: ulid + +# ASCII + +This validates that a string value contains only ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: ascii + +# Printable ASCII + +This validates that a string value contains only printable ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: printascii + +# Multi-Byte Characters + +This validates that a string value contains one or more multibyte characters. +NOTE: if the string is blank, this validates as true. + + Usage: multibyte + +# Data URL + +This validates that a string value contains a valid DataURI. +NOTE: this will also validate that the data portion is valid base64 + + Usage: datauri + +# Latitude + +This validates that a string value contains a valid latitude. + + Usage: latitude + +# Longitude + +This validates that a string value contains a valid longitude. + + Usage: longitude + +# Employeer Identification Number EIN + +This validates that a string value contains a valid U.S. Employer Identification Number. + + Usage: ein + +# Social Security Number SSN + +This validates that a string value contains a valid U.S. Social Security Number. + + Usage: ssn + +# Internet Protocol Address IP + +This validates that a string value contains a valid IP Address. + + Usage: ip + +# Internet Protocol Address IPv4 + +This validates that a string value contains a valid v4 IP Address. + + Usage: ipv4 + +# Internet Protocol Address IPv6 + +This validates that a string value contains a valid v6 IP Address. + + Usage: ipv6 + +# Classless Inter-Domain Routing CIDR + +This validates that a string value contains a valid CIDR Address. + + Usage: cidr + +# Classless Inter-Domain Routing CIDRv4 + +This validates that a string value contains a valid v4 CIDR Address. + + Usage: cidrv4 + +# Classless Inter-Domain Routing CIDRv6 + +This validates that a string value contains a valid v6 CIDR Address. + + Usage: cidrv6 + +# Transmission Control Protocol Address TCP + +This validates that a string value contains a valid resolvable TCP Address. + + Usage: tcp_addr + +# Transmission Control Protocol Address TCPv4 + +This validates that a string value contains a valid resolvable v4 TCP Address. + + Usage: tcp4_addr + +# Transmission Control Protocol Address TCPv6 + +This validates that a string value contains a valid resolvable v6 TCP Address. + + Usage: tcp6_addr + +# User Datagram Protocol Address UDP + +This validates that a string value contains a valid resolvable UDP Address. + + Usage: udp_addr + +# User Datagram Protocol Address UDPv4 + +This validates that a string value contains a valid resolvable v4 UDP Address. + + Usage: udp4_addr + +# User Datagram Protocol Address UDPv6 + +This validates that a string value contains a valid resolvable v6 UDP Address. + + Usage: udp6_addr + +# Internet Protocol Address IP + +This validates that a string value contains a valid resolvable IP Address. + + Usage: ip_addr + +# Internet Protocol Address IPv4 + +This validates that a string value contains a valid resolvable v4 IP Address. + + Usage: ip4_addr + +# Internet Protocol Address IPv6 + +This validates that a string value contains a valid resolvable v6 IP Address. + + Usage: ip6_addr + +# Unix domain socket end point Address + +This validates that a string value contains a valid Unix Address. + + Usage: unix_addr + +# Media Access Control Address MAC + +This validates that a string value contains a valid MAC Address. + + Usage: mac + +Note: See Go's ParseMAC for accepted formats and types: + + http://golang.org/src/net/mac.go?s=866:918#L29 + +# Hostname RFC 952 + +This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952 + + Usage: hostname + +# Hostname RFC 1123 + +This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123 + + Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias. + +Full Qualified Domain Name (FQDN) + +This validates that a string value contains a valid FQDN. + + Usage: fqdn + +# HTML Tags + +This validates that a string value appears to be an HTML element tag +including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element + + Usage: html + +# HTML Encoded + +This validates that a string value is a proper character reference in decimal +or hexadecimal format + + Usage: html_encoded + +# URL Encoded + +This validates that a string value is percent-encoded (URL encoded) according +to https://tools.ietf.org/html/rfc3986#section-2.1 + + Usage: url_encoded + +# Directory + +This validates that a string value contains a valid directory and that +it exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: dir + +# Directory Path + +This validates that a string value contains a valid directory but does +not validate the existence of that directory. +This is done using os.Stat, which is a platform independent function. +It is safest to suffix the string with os.PathSeparator if the directory +may not exist at the time of validation. + + Usage: dirpath + +# HostPort + +This validates that a string value contains a valid DNS hostname and port that +can be used to validate fields typically passed to sockets and connections. + + Usage: hostname_port + +# Port + +This validates that the value falls within the valid port number range of 1 to 65,535. + + Usage: port + +# Datetime + +This validates that a string value is a valid datetime based on the supplied datetime format. +Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/ + + Usage: datetime=2006-01-02 + +# Iso3166-1 alpha-2 + +This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha2 + +# Iso3166-1 alpha-3 + +This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +# Iso3166-1 alpha-numeric + +This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +# BCP 47 Language Tag + +This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse. +More information on https://pkg.go.dev/golang.org/x/text/language + + Usage: bcp47_language_tag + +BIC (SWIFT code) + +This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362. +More information on https://www.iso.org/standard/60390.html + + Usage: bic + +# RFC 1035 label + +This validates that a string value is a valid dns RFC 1035 label, defined in RFC 1035. +More information on https://datatracker.ietf.org/doc/html/rfc1035 + + Usage: dns_rfc1035_label + +# TimeZone + +This validates that a string value is a valid time zone based on the time zone database present on the system. +Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator. +More information on https://golang.org/pkg/time/#LoadLocation + + Usage: timezone + +# Semantic Version + +This validates that a string value is a valid semver version, defined in Semantic Versioning 2.0.0. +More information on https://semver.org/ + + Usage: semver + +# CVE Identifier + +This validates that a string value is a valid cve id, defined in cve mitre. +More information on https://cve.mitre.org/ + + Usage: cve + +# Credit Card + +This validates that a string value contains a valid credit card number using Luhn algorithm. + + Usage: credit_card + +# Luhn Checksum + + Usage: luhn_checksum + +This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm. + +# MongoDB + +This validates that a string is a valid 24 character hexadecimal string or valid connection string. + + Usage: mongodb + mongodb_connection_string + +Example: + + type Test struct { + ObjectIdField string `validate:"mongodb"` + ConnectionStringField string `validate:"mongodb_connection_string"` + } + +# Cron + +This validates that a string value contains a valid cron expression. + + Usage: cron + +# SpiceDb ObjectID/Permission/Object Type + +This validates that a string is valid for use with SpiceDb for the indicated purpose. If no purpose is given, a purpose of 'id' is assumed. + + Usage: spicedb=id|permission|type + +# Alias Validators and Tags + +Alias Validators and Tags +NOTE: When returning an error, the tag returned in "FieldError" will be +the alias tag unless the dive tag is part of the alias. Everything after the +dive tag is not reported as the alias tag. Also, the "ActualTag" in the before +case will be the actual tag within the alias that failed. + +Here is a list of the current built in alias tags: + + "iscolor" + alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + "country_code" + alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code) + +Validator notes: + + regex + a regex validator won't be added because commas and = signs can be part + of a regex which conflict with the validation definitions. Although + workarounds can be made, they take away from using pure regex's. + Furthermore it's quick and dirty but the regex's become harder to + maintain and are not reusable, so it's as much a programming philosophy + as anything. + + In place of this new validator functions should be created; a regex can + be used within the validator function and even be precompiled for better + efficiency within regexes.go. + + And the best reason, you can submit a pull request and we can keep on + adding to the validation library of this package! + +# Non standard validators + +A collection of validation rules that are frequently needed but are more +complex than the ones found in the baked in validators. +A non standard validator must be registered manually like you would +with your own custom validation functions. + +Example of registration and use: + + type Test struct { + TestField string `validate:"yourtag"` + } + + t := &Test{ + TestField: "Test" + } + + validate := validator.New() + validate.RegisterValidation("yourtag", validators.NotBlank) + +Here is a list of the current non standard validators: + + NotBlank + This validates that the value is not blank or with length zero. + For strings ensures they do not contain only spaces. For channels, maps, slices and arrays + ensures they don't have zero length. For others, a non empty value is required. + + Usage: notblank + +# Panics + +This package panics when bad input is provided, this is by design, bad code like +that should not make it to production. + + type Test struct { + TestField string `validate:"nonexistantfunction=1"` + } + + t := &Test{ + TestField: "Test" + } + + validate.Struct(t) // this will panic +*/ +package validator diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go new file mode 100644 index 0000000000..fd90625672 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/errors.go @@ -0,0 +1,273 @@ +package validator + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + ut "github.com/go-playground/universal-translator" +) + +const ( + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" +) + +// ValidationErrorsTranslations is the translation return type +type ValidationErrorsTranslations map[string]string + +// InvalidValidationError describes an invalid argument passed to +// `Struct`, `StructExcept`, StructPartial` or `Field` +type InvalidValidationError struct { + Type reflect.Type +} + +// Error returns InvalidValidationError message +func (e *InvalidValidationError) Error() string { + if e.Type == nil { + return "validator: (nil)" + } + + return "validator: (nil " + e.Type.String() + ")" +} + +// ValidationErrors is an array of FieldError's +// for use in custom error messages post validation. +type ValidationErrors []FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors array +func (ve ValidationErrors) Error() string { + buff := bytes.NewBufferString("") + + for i := 0; i < len(ve); i++ { + buff.WriteString(ve[i].Error()) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// Translate translates all of the ValidationErrors +func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations { + trans := make(ValidationErrorsTranslations) + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + fe = ve[i].(*fieldError) + + // // in case an Anonymous struct was used, ensure that the key + // // would be 'Username' instead of ".Username" + // if len(fe.ns) > 0 && fe.ns[:1] == "." { + // trans[fe.ns[1:]] = fe.Translate(ut) + // continue + // } + + trans[fe.ns] = fe.Translate(ut) + } + + return trans +} + +// FieldError contains all functions to get error details +type FieldError interface { + + // Tag returns the validation tag that failed. if the + // validation was an alias, this will return the + // alias name and not the underlying tag that failed. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "iscolor" + Tag() string + + // ActualTag returns the validation tag that failed, even if an + // alias the actual tag within the alias will be returned. + // If an 'or' validation fails the entire or will be returned. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "hexcolor|rgb|rgba|hsl|hsla" + ActualTag() string + + // Namespace returns the namespace for the field error, with the tag + // name taking precedence over the field's actual name. + // + // eg. JSON name "User.fname" + // + // See StructNamespace() for a version that returns actual names. + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + Namespace() string + + // StructNamespace returns the namespace for the field error, with the field's + // actual name. + // + // eg. "User.FirstName" see Namespace for comparison + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract its name + StructNamespace() string + + // Field returns the field's name with the tag name taking precedence over the + // field's actual name. + // + // `RegisterTagNameFunc` must be registered to get tag value. + // + // eg. JSON name "fname" + // see StructField for comparison + Field() string + + // StructField returns the field's actual name from the struct, when able to determine. + // + // eg. "FirstName" + // see Field for comparison + StructField() string + + // Value returns the actual field's value in case needed for creating the error + // message + Value() interface{} + + // Param returns the param value, in string form for comparison; this will also + // help with generating an error message + Param() string + + // Kind returns the Field's reflect Kind + // + // eg. time.Time's kind is a struct + Kind() reflect.Kind + + // Type returns the Field's reflect Type + // + // eg. time.Time's type is time.Time + Type() reflect.Type + + // Translate returns the FieldError's translated error + // from the provided 'ut.Translator' and registered 'TranslationFunc' + // + // NOTE: if no registered translator can be found it returns the same as + // calling fe.Error() + Translate(ut ut.Translator) string + + // Error returns the FieldError's message + Error() string +} + +// compile time interface checks +var _ FieldError = new(fieldError) +var _ error = new(fieldError) + +// fieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +// it complies with the FieldError interface +type fieldError struct { + v *Validate + tag string + actualTag string + ns string + structNs string + fieldLen uint8 + structfieldLen uint8 + value interface{} + param string + kind reflect.Kind + typ reflect.Type +} + +// Tag returns the validation tag that failed. +func (fe *fieldError) Tag() string { + return fe.tag +} + +// ActualTag returns the validation tag that failed, even if an +// alias the actual tag within the alias will be returned. +func (fe *fieldError) ActualTag() string { + return fe.actualTag +} + +// Namespace returns the namespace for the field error, with the tag +// name taking precedence over the field's actual name. +func (fe *fieldError) Namespace() string { + return fe.ns +} + +// StructNamespace returns the namespace for the field error, with the field's +// actual name. +func (fe *fieldError) StructNamespace() string { + return fe.structNs +} + +// Field returns the field's name with the tag name taking precedence over the +// field's actual name. +func (fe *fieldError) Field() string { + return fe.ns[len(fe.ns)-int(fe.fieldLen):] + // // return fe.field + // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):] + + // log.Println("FLD:", fld) + + // if len(fld) > 0 && fld[:1] == "." { + // return fld[1:] + // } + + // return fld +} + +// StructField returns the field's actual name from the struct, when able to determine. +func (fe *fieldError) StructField() string { + // return fe.structField + return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):] +} + +// Value returns the actual field's value in case needed for creating the error +// message +func (fe *fieldError) Value() interface{} { + return fe.value +} + +// Param returns the param value, in string form for comparison; this will +// also help with generating an error message +func (fe *fieldError) Param() string { + return fe.param +} + +// Kind returns the Field's reflect Kind +func (fe *fieldError) Kind() reflect.Kind { + return fe.kind +} + +// Type returns the Field's reflect Type +func (fe *fieldError) Type() reflect.Type { + return fe.typ +} + +// Error returns the fieldError's error message +func (fe *fieldError) Error() string { + return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag) +} + +// Translate returns the FieldError's translated error +// from the provided 'ut.Translator' and registered 'TranslationFunc' +// +// NOTE: if no registered translation can be found, it returns the original +// untranslated error message. +func (fe *fieldError) Translate(ut ut.Translator) string { + var fn TranslationFunc + + m, ok := fe.v.transTagFunc[ut] + if !ok { + return fe.Error() + } + + fn, ok = m[fe.tag] + if !ok { + fn, ok = m[fe.actualTag] + if !ok { + return fe.Error() + } + } + + return fn(ut, fe) +} diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go new file mode 100644 index 0000000000..ef35826ee6 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/field_level.go @@ -0,0 +1,120 @@ +package validator + +import "reflect" + +// FieldLevel contains all the information and helper functions +// to validate a field +type FieldLevel interface { + + // Top returns the top level struct, if any + Top() reflect.Value + + // Parent returns the current fields parent struct, if any or + // the comparison value if called 'VarWithValue' + Parent() reflect.Value + + // Field returns current field for validation + Field() reflect.Value + + // FieldName returns the field's name with the tag + // name taking precedence over the fields actual name. + FieldName() string + + // StructFieldName returns the struct field's name + StructFieldName() string + + // Param returns param for validation against current field + Param() string + + // GetTag returns the current validations tag name + GetTag() string + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and it's kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + // + // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. + GetStructFieldOK() (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + // + // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. + GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) + + // GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) +} + +var _ FieldLevel = new(validate) + +// Field returns current field for validation +func (v *validate) Field() reflect.Value { + return v.flField +} + +// FieldName returns the field's name with the tag +// name taking precedence over the fields actual name. +func (v *validate) FieldName() string { + return v.cf.altName +} + +// GetTag returns the current validations tag name +func (v *validate) GetTag() string { + return v.ct.tag +} + +// StructFieldName returns the struct field's name +func (v *validate) StructFieldName() string { + return v.cf.name +} + +// Param returns param for validation against current field +func (v *validate) Param() string { + return v.ct.param +} + +// GetStructFieldOK returns Param returns param for validation against current field +// +// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param) + return current, kind, found +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +// +// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace) + return current, kind, found +} + +// GetStructFieldOK2 returns Param returns param for validation against current field +func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(v.slflParent, v.ct.param) +} + +// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(val, namespace) +} diff --git a/vendor/github.com/go-playground/validator/v10/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png new file mode 100644 index 0000000000..355000f524 Binary files /dev/null and b/vendor/github.com/go-playground/validator/v10/logo.png differ diff --git a/vendor/github.com/go-playground/validator/v10/options.go b/vendor/github.com/go-playground/validator/v10/options.go new file mode 100644 index 0000000000..86a0db218e --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/options.go @@ -0,0 +1,26 @@ +package validator + +// Option represents a configurations option to be applied to validator during initialization. +type Option func(*Validate) + +// WithRequiredStructEnabled enables required tag on non-pointer structs to be applied instead of ignored. +// +// This was made opt-in behaviour in order to maintain backward compatibility with the behaviour previous +// to being able to apply struct level validations on struct fields directly. +// +// It is recommended you enabled this as it will be the default behaviour in v11+ +func WithRequiredStructEnabled() Option { + return func(v *Validate) { + v.requiredStructEnabled = true + } +} + +// WithPrivateFieldValidation activates validation for unexported fields via the use of the `unsafe` package. +// +// By opting into this feature you are acknowledging that you are aware of the risks and accept any current or future +// consequences of using this feature. +func WithPrivateFieldValidation() Option { + return func(v *Validate) { + v.privateFieldValidation = true + } +} diff --git a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go new file mode 100644 index 0000000000..326b8f7538 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go @@ -0,0 +1,179 @@ +package validator + +import ( + "regexp" + "sync" +) + +var postCodePatternDict = map[string]string{ + "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`, + "JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`, + "GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`, + "IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`, + "US": `^\d{5}([ \-]\d{4})?$`, + "CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`, + "DE": `^\d{5}$`, + "JP": `^\d{3}-\d{4}$`, + "FR": `^\d{2}[ ]?\d{3}$`, + "AU": `^\d{4}$`, + "IT": `^\d{5}$`, + "CH": `^\d{4}$`, + "AT": `^\d{4}$`, + "ES": `^\d{5}$`, + "NL": `^\d{4}[ ]?[A-Z]{2}$`, + "BE": `^\d{4}$`, + "DK": `^\d{4}$`, + "SE": `^\d{3}[ ]?\d{2}$`, + "NO": `^\d{4}$`, + "BR": `^\d{5}[\-]?\d{3}$`, + "PT": `^\d{4}([\-]\d{3})?$`, + "FI": `^\d{5}$`, + "AX": `^22\d{3}$`, + "KR": `^\d{3}[\-]\d{3}$`, + "CN": `^\d{6}$`, + "TW": `^\d{3}(\d{2})?$`, + "SG": `^\d{6}$`, + "DZ": `^\d{5}$`, + "AD": `^AD\d{3}$`, + "AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`, + "AM": `^(37)?\d{4}$`, + "AZ": `^\d{4}$`, + "BH": `^((1[0-2]|[2-9])\d{2})?$`, + "BD": `^\d{4}$`, + "BB": `^(BB\d{5})?$`, + "BY": `^\d{6}$`, + "BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`, + "BA": `^\d{5}$`, + "IO": `^BBND 1ZZ$`, + "BN": `^[A-Z]{2}[ ]?\d{4}$`, + "BG": `^\d{4}$`, + "KH": `^\d{5}$`, + "CV": `^\d{4}$`, + "CL": `^\d{7}$`, + "CR": `^\d{4,5}|\d{3}-\d{4}$`, + "HR": `^\d{5}$`, + "CY": `^\d{4}$`, + "CZ": `^\d{3}[ ]?\d{2}$`, + "DO": `^\d{5}$`, + "EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`, + "EG": `^\d{5}$`, + "EE": `^\d{5}$`, + "FO": `^\d{3}$`, + "GE": `^\d{4}$`, + "GR": `^\d{3}[ ]?\d{2}$`, + "GL": `^39\d{2}$`, + "GT": `^\d{5}$`, + "HT": `^\d{4}$`, + "HN": `^(?:\d{5})?$`, + "HU": `^\d{4}$`, + "IS": `^\d{3}$`, + "IN": `^\d{6}$`, + "ID": `^\d{5}$`, + "IL": `^\d{5}$`, + "JO": `^\d{5}$`, + "KZ": `^\d{6}$`, + "KE": `^\d{5}$`, + "KW": `^\d{5}$`, + "LA": `^\d{5}$`, + "LV": `^\d{4}$`, + "LB": `^(\d{4}([ ]?\d{4})?)?$`, + "LI": `^(948[5-9])|(949[0-7])$`, + "LT": `^\d{5}$`, + "LU": `^\d{4}$`, + "MK": `^\d{4}$`, + "MY": `^\d{5}$`, + "MV": `^\d{5}$`, + "MT": `^[A-Z]{3}[ ]?\d{2,4}$`, + "MU": `^(\d{3}[A-Z]{2}\d{3})?$`, + "MX": `^\d{5}$`, + "MD": `^\d{4}$`, + "MC": `^980\d{2}$`, + "MA": `^\d{5}$`, + "NP": `^\d{5}$`, + "NZ": `^\d{4}$`, + "NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`, + "NG": `^(\d{6})?$`, + "OM": `^(PC )?\d{3}$`, + "PK": `^\d{5}$`, + "PY": `^\d{4}$`, + "PH": `^\d{4}$`, + "PL": `^\d{2}-\d{3}$`, + "PR": `^00[679]\d{2}([ \-]\d{4})?$`, + "RO": `^\d{6}$`, + "RU": `^\d{6}$`, + "SM": `^4789\d$`, + "SA": `^\d{5}$`, + "SN": `^\d{5}$`, + "SK": `^\d{3}[ ]?\d{2}$`, + "SI": `^\d{4}$`, + "ZA": `^\d{4}$`, + "LK": `^\d{5}$`, + "TJ": `^\d{6}$`, + "TH": `^\d{5}$`, + "TN": `^\d{4}$`, + "TR": `^\d{5}$`, + "TM": `^\d{6}$`, + "UA": `^\d{5}$`, + "UY": `^\d{5}$`, + "UZ": `^\d{6}$`, + "VA": `^00120$`, + "VE": `^\d{4}$`, + "ZM": `^\d{5}$`, + "AS": `^96799$`, + "CC": `^6799$`, + "CK": `^\d{4}$`, + "RS": `^\d{6}$`, + "ME": `^8\d{4}$`, + "CS": `^\d{5}$`, + "YU": `^\d{5}$`, + "CX": `^6798$`, + "ET": `^\d{4}$`, + "FK": `^FIQQ 1ZZ$`, + "NF": `^2899$`, + "FM": `^(9694[1-4])([ \-]\d{4})?$`, + "GF": `^9[78]3\d{2}$`, + "GN": `^\d{3}$`, + "GP": `^9[78][01]\d{2}$`, + "GS": `^SIQQ 1ZZ$`, + "GU": `^969[123]\d([ \-]\d{4})?$`, + "GW": `^\d{4}$`, + "HM": `^\d{4}$`, + "IQ": `^\d{5}$`, + "KG": `^\d{6}$`, + "LR": `^\d{4}$`, + "LS": `^\d{3}$`, + "MG": `^\d{3}$`, + "MH": `^969[67]\d([ \-]\d{4})?$`, + "MN": `^\d{6}$`, + "MP": `^9695[012]([ \-]\d{4})?$`, + "MQ": `^9[78]2\d{2}$`, + "NC": `^988\d{2}$`, + "NE": `^\d{4}$`, + "VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`, + "VN": `^[0-9]{1,6}$`, + "PF": `^987\d{2}$`, + "PG": `^\d{3}$`, + "PM": `^9[78]5\d{2}$`, + "PN": `^PCRN 1ZZ$`, + "PW": `^96940$`, + "RE": `^9[78]4\d{2}$`, + "SH": `^(ASCN|STHL) 1ZZ$`, + "SJ": `^\d{4}$`, + "SO": `^\d{5}$`, + "SZ": `^[HLMS]\d{3}$`, + "TC": `^TKCA 1ZZ$`, + "WF": `^986\d{2}$`, + "XK": `^\d{5}$`, + "YT": `^976\d{2}$`, +} + +var ( + postcodeRegexInit sync.Once + postCodeRegexDict = map[string]*regexp.Regexp{} +) + +func initPostcodes() { + for countryCode, pattern := range postCodePatternDict { + postCodeRegexDict[countryCode] = regexp.MustCompile(pattern) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go new file mode 100644 index 0000000000..0b3615f5e4 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -0,0 +1,167 @@ +package validator + +import ( + "regexp" + "sync" +) + +const ( + alphaRegexString = "^[a-zA-Z]+$" + alphaSpaceRegexString = "^[a-zA-Z ]+$" + alphaNumericRegexString = "^[a-zA-Z0-9]+$" + alphaUnicodeRegexString = "^[\\p{L}]+$" + alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$" + numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" + numberRegexString = "^[0-9]+$" + hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$" + hexColorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$" + rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" + rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" + hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + e164RegexString = "^\\+[1-9]?[0-9]{7,14}$" + base32RegexString = "^(?:[A-Z2-7]{8})*(?:[A-Z2-7]{2}={6}|[A-Z2-7]{4}={4}|[A-Z2-7]{5}={3}|[A-Z2-7]{7}=|[A-Z2-7]{8})$" + base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" + base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$" + iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" + iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" + iSSNRegexString = "^(?:[0-9]{4}-[0-9]{3}[0-9X])$" + uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + uLIDRegexString = "^(?i)[A-HJKMNP-TV-Z0-9]{26}$" + md4RegexString = "^[0-9a-f]{32}$" + md5RegexString = "^[0-9a-f]{32}$" + sha256RegexString = "^[0-9a-f]{64}$" + sha384RegexString = "^[0-9a-f]{96}$" + sha512RegexString = "^[0-9a-f]{128}$" + ripemd128RegexString = "^[0-9a-f]{32}$" + ripemd160RegexString = "^[0-9a-f]{40}$" + tiger128RegexString = "^[0-9a-f]{32}$" + tiger160RegexString = "^[0-9a-f]{40}$" + tiger192RegexString = "^[0-9a-f]{48}$" + aSCIIRegexString = "^[\x00-\x7F]*$" + printableASCIIRegexString = "^[\x20-\x7E]*$" + multibyteRegexString = "[^\x00-\x7F]" + dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)` + latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$` + hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 + hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 + fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.') + btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address + btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + ethAddressRegexString = `^0x[0-9a-fA-F]{40}$` + ethAddressUpperRegexString = `^0x[0-9A-F]{40}$` + ethAddressLowerRegexString = `^0x[0-9a-f]{40}$` + uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$` + hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?` + hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` + jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$" + splitParamsRegexString = `'[^']*'|\S+` + bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$` + semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/ + dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9])?$" + cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html + mongodbIdRegexString = "^[a-f\\d]{24}$" + mongodbConnStringRegexString = "^mongodb(\\+srv)?:\\/\\/(([a-zA-Z\\d]+):([a-zA-Z\\d$:\\/?#\\[\\]@]+)@)?(([a-z\\d.-]+)(:[\\d]+)?)((,(([a-z\\d.-]+)(:(\\d+))?))*)?(\\/[a-zA-Z-_]{1,64})?(\\?(([a-zA-Z]+)=([a-zA-Z\\d]+))(&(([a-zA-Z\\d]+)=([a-zA-Z\\d]+))?)*)?$" + cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|((\*|\d+)(\/|-)\d+)|\d+|\*) ?){5,7})` + spicedbIDRegexString = `^(([a-zA-Z0-9/_|\-=+]{1,})|\*)$` + spicedbPermissionRegexString = "^([a-z][a-z0-9_]{1,62}[a-z0-9])?$" + spicedbTypeRegexString = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$" + einRegexString = "^(\\d{2}-\\d{7})$" +) + +func lazyRegexCompile(str string) func() *regexp.Regexp { + var regex *regexp.Regexp + var once sync.Once + return func() *regexp.Regexp { + once.Do(func() { + regex = regexp.MustCompile(str) + }) + return regex + } +} + +var ( + alphaRegex = lazyRegexCompile(alphaRegexString) + alphaSpaceRegex = lazyRegexCompile(alphaSpaceRegexString) + alphaNumericRegex = lazyRegexCompile(alphaNumericRegexString) + alphaUnicodeRegex = lazyRegexCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = lazyRegexCompile(alphaUnicodeNumericRegexString) + numericRegex = lazyRegexCompile(numericRegexString) + numberRegex = lazyRegexCompile(numberRegexString) + hexadecimalRegex = lazyRegexCompile(hexadecimalRegexString) + hexColorRegex = lazyRegexCompile(hexColorRegexString) + rgbRegex = lazyRegexCompile(rgbRegexString) + rgbaRegex = lazyRegexCompile(rgbaRegexString) + hslRegex = lazyRegexCompile(hslRegexString) + hslaRegex = lazyRegexCompile(hslaRegexString) + e164Regex = lazyRegexCompile(e164RegexString) + emailRegex = lazyRegexCompile(emailRegexString) + base32Regex = lazyRegexCompile(base32RegexString) + base64Regex = lazyRegexCompile(base64RegexString) + base64URLRegex = lazyRegexCompile(base64URLRegexString) + base64RawURLRegex = lazyRegexCompile(base64RawURLRegexString) + iSBN10Regex = lazyRegexCompile(iSBN10RegexString) + iSBN13Regex = lazyRegexCompile(iSBN13RegexString) + iSSNRegex = lazyRegexCompile(iSSNRegexString) + uUID3Regex = lazyRegexCompile(uUID3RegexString) + uUID4Regex = lazyRegexCompile(uUID4RegexString) + uUID5Regex = lazyRegexCompile(uUID5RegexString) + uUIDRegex = lazyRegexCompile(uUIDRegexString) + uUID3RFC4122Regex = lazyRegexCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = lazyRegexCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = lazyRegexCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = lazyRegexCompile(uUIDRFC4122RegexString) + uLIDRegex = lazyRegexCompile(uLIDRegexString) + md4Regex = lazyRegexCompile(md4RegexString) + md5Regex = lazyRegexCompile(md5RegexString) + sha256Regex = lazyRegexCompile(sha256RegexString) + sha384Regex = lazyRegexCompile(sha384RegexString) + sha512Regex = lazyRegexCompile(sha512RegexString) + ripemd128Regex = lazyRegexCompile(ripemd128RegexString) + ripemd160Regex = lazyRegexCompile(ripemd160RegexString) + tiger128Regex = lazyRegexCompile(tiger128RegexString) + tiger160Regex = lazyRegexCompile(tiger160RegexString) + tiger192Regex = lazyRegexCompile(tiger192RegexString) + aSCIIRegex = lazyRegexCompile(aSCIIRegexString) + printableASCIIRegex = lazyRegexCompile(printableASCIIRegexString) + multibyteRegex = lazyRegexCompile(multibyteRegexString) + dataURIRegex = lazyRegexCompile(dataURIRegexString) + latitudeRegex = lazyRegexCompile(latitudeRegexString) + longitudeRegex = lazyRegexCompile(longitudeRegexString) + sSNRegex = lazyRegexCompile(sSNRegexString) + hostnameRegexRFC952 = lazyRegexCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = lazyRegexCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = lazyRegexCompile(fqdnRegexStringRFC1123) + btcAddressRegex = lazyRegexCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = lazyRegexCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = lazyRegexCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = lazyRegexCompile(ethAddressRegexString) + uRLEncodedRegex = lazyRegexCompile(uRLEncodedRegexString) + hTMLEncodedRegex = lazyRegexCompile(hTMLEncodedRegexString) + hTMLRegex = lazyRegexCompile(hTMLRegexString) + jWTRegex = lazyRegexCompile(jWTRegexString) + splitParamsRegex = lazyRegexCompile(splitParamsRegexString) + bicRegex = lazyRegexCompile(bicRegexString) + semverRegex = lazyRegexCompile(semverRegexString) + dnsRegexRFC1035Label = lazyRegexCompile(dnsRegexStringRFC1035Label) + cveRegex = lazyRegexCompile(cveRegexString) + mongodbIdRegex = lazyRegexCompile(mongodbIdRegexString) + mongodbConnectionRegex = lazyRegexCompile(mongodbConnStringRegexString) + cronRegex = lazyRegexCompile(cronRegexString) + spicedbIDRegex = lazyRegexCompile(spicedbIDRegexString) + spicedbPermissionRegex = lazyRegexCompile(spicedbPermissionRegexString) + spicedbTypeRegex = lazyRegexCompile(spicedbTypeRegexString) + einRegex = lazyRegexCompile(einRegexString) +) diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go new file mode 100644 index 0000000000..129b287258 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/struct_level.go @@ -0,0 +1,171 @@ +package validator + +import ( + "context" + "reflect" +) + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(sl StructLevel) + +// StructLevelFuncCtx accepts all values needed for struct level validation +// but also allows passing of contextual validation information via context.Context. +type StructLevelFuncCtx func(ctx context.Context, sl StructLevel) + +// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx +func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx { + return func(ctx context.Context, sl StructLevel) { + fn(sl) + } +} + +// StructLevel contains all the information and helper functions +// to validate a struct +type StructLevel interface { + + // Validator returns the main validation object, in case one wants to call validations internally. + // this is so you don't have to use anonymous functions to get access to the validate + // instance. + Validator() *Validate + + // Top returns the top level struct, if any + Top() reflect.Value + + // Parent returns the current fields parent struct, if any + Parent() reflect.Value + + // Current returns the current struct. + Current() reflect.Value + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and its kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // ReportError reports an error just by passing the field and tag information + // + // NOTES: + // + // fieldName and structFieldName get appended to the existing + // namespace that validator is on. e.g. pass 'FirstName' or + // 'Names[0]' depending on the nesting + // + // tag can be an existing validation tag or just something you make up + // and process on the flip side it's up to you. + ReportError(field interface{}, fieldName, structFieldName string, tag, param string) + + // ReportValidationErrors reports an error just by passing ValidationErrors + // + // NOTES: + // + // relativeNamespace and relativeActualNamespace get appended to the + // existing namespace that validator is on. + // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending + // on the nesting. most of the time they will be blank, unless you validate + // at a level lower the current field depth + ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) +} + +var _ StructLevel = new(validate) + +// Top returns the top level struct +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an accurate value otherwise. +func (v *validate) Top() reflect.Value { + return v.top +} + +// Parent returns the current structs parent +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an accurate value otherwise. +func (v *validate) Parent() reflect.Value { + return v.slflParent +} + +// Current returns the current struct. +func (v *validate) Current() reflect.Value { + return v.slCurrent +} + +// Validator returns the main validation object, in case one want to call validations internally. +func (v *validate) Validator() *Validate { + return v.v +} + +// ExtractType gets the actual underlying type of field value. +func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) { + return v.extractTypeInternal(field, false) +} + +// ReportError reports an error just by passing the field and tag information +func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) { + fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false) + + if len(structFieldName) == 0 { + structFieldName = fieldName + } + + v.str1 = string(append(v.ns, fieldName...)) + + if v.v.hasTagNameFunc || fieldName != structFieldName { + v.str2 = string(append(v.actualNs, structFieldName...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + param: param, + kind: kind, + }, + ) + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + value: getValue(fv), + param: param, + kind: kind, + typ: fv.Type(), + }, + ) +} + +// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation. +// +// NOTE: this function prepends the current namespace to the relative ones. +func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) { + var err *fieldError + + for i := 0; i < len(errs); i++ { + err = errs[i].(*fieldError) + err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...)) + err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...)) + + v.errs = append(v.errs, err) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go new file mode 100644 index 0000000000..4d9d75c13a --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/translations.go @@ -0,0 +1,11 @@ +package validator + +import ut "github.com/go-playground/universal-translator" + +// TranslationFunc is the function type used to register or override +// custom translations +type TranslationFunc func(ut ut.Translator, fe FieldError) string + +// RegisterTranslationsFunc allows for registering of translations +// for a 'ut.Translator' for use within the 'TranslationFunc' +type RegisterTranslationsFunc func(ut ut.Translator) error diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go new file mode 100644 index 0000000000..b1fd8cc11a --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -0,0 +1,305 @@ +package validator + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// extractTypeInternal gets the actual underlying type of field value. +// It will dive into pointers, customTypes and return you the +// underlying value and it's kind. +func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { +BEGIN: + switch current.Kind() { + case reflect.Ptr: + + nullable = true + + if current.IsNil() { + return current, reflect.Ptr, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Interface: + + nullable = true + + if current.IsNil() { + return current, reflect.Interface, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Invalid: + return current, reflect.Invalid, nullable + + default: + + if v.v.hasCustomFuncs { + if fn, ok := v.v.customFuncs[current.Type()]; ok { + current = reflect.ValueOf(fn(current)) + goto BEGIN + } + } + + return current, current.Kind(), nullable + } +} + +// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and +// returns the field, field kind and whether is was successful in retrieving the field at all. +// +// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field +// could not be retrieved because it didn't exist. +func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) { +BEGIN: + current, kind, nullable = v.ExtractType(val) + if kind == reflect.Invalid { + return + } + + if namespace == "" { + found = true + return + } + + switch kind { + case reflect.Ptr, reflect.Interface: + return + + case reflect.Struct: + + typ := current.Type() + fld := namespace + var ns string + + if !typ.ConvertibleTo(timeType) { + idx := strings.Index(namespace, namespaceSeparator) + + if idx != -1 { + fld = namespace[:idx] + ns = namespace[idx+1:] + } else { + ns = "" + } + + bracketIdx := strings.Index(fld, leftBracket) + if bracketIdx != -1 { + fld = fld[:bracketIdx] + + ns = namespace[bracketIdx:] + } + + val = current.FieldByName(fld) + namespace = ns + goto BEGIN + } + + case reflect.Array, reflect.Slice: + idx := strings.Index(namespace, leftBracket) + idx2 := strings.Index(namespace, rightBracket) + + arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) + + if arrIdx >= current.Len() { + return + } + + startIdx := idx2 + 1 + + if startIdx < len(namespace) { + if namespace[startIdx:startIdx+1] == namespaceSeparator { + startIdx++ + } + } + + val = current.Index(arrIdx) + namespace = namespace[startIdx:] + goto BEGIN + + case reflect.Map: + idx := strings.Index(namespace, leftBracket) + 1 + idx2 := strings.Index(namespace, rightBracket) + + endIdx := idx2 + + if endIdx+1 < len(namespace) { + if namespace[endIdx+1:endIdx+2] == namespaceSeparator { + endIdx++ + } + } + + key := namespace[idx:idx2] + + switch current.Type().Key().Kind() { + case reflect.Int: + i, _ := strconv.Atoi(key) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Int8: + i, _ := strconv.ParseInt(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(int8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int16: + i, _ := strconv.ParseInt(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(int16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int32: + i, _ := strconv.ParseInt(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(int32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int64: + i, _ := strconv.ParseInt(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Uint: + i, _ := strconv.ParseUint(key, 10, 0) + val = current.MapIndex(reflect.ValueOf(uint(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint8: + i, _ := strconv.ParseUint(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(uint8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint16: + i, _ := strconv.ParseUint(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(uint16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint32: + i, _ := strconv.ParseUint(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(uint32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint64: + i, _ := strconv.ParseUint(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Float32: + f, _ := strconv.ParseFloat(key, 32) + val = current.MapIndex(reflect.ValueOf(float32(f))) + namespace = namespace[endIdx+1:] + + case reflect.Float64: + f, _ := strconv.ParseFloat(key, 64) + val = current.MapIndex(reflect.ValueOf(f)) + namespace = namespace[endIdx+1:] + + case reflect.Bool: + b, _ := strconv.ParseBool(key) + val = current.MapIndex(reflect.ValueOf(b)) + namespace = namespace[endIdx+1:] + + // reflect.Type = string + default: + val = current.MapIndex(reflect.ValueOf(key)) + namespace = namespace[endIdx+1:] + } + + goto BEGIN + } + + // if got here there was more namespace, cannot go any deeper + panic("Invalid field namespace") +} + +// asInt returns the parameter as an int64 +// or panics if it can't convert +func asInt(param string) int64 { + i, err := strconv.ParseInt(param, 0, 64) + panicIf(err) + + return i +} + +// asIntFromTimeDuration parses param as time.Duration and returns it as int64 +// or panics on error. +func asIntFromTimeDuration(param string) int64 { + d, err := time.ParseDuration(param) + if err != nil { + // attempt parsing as an integer assuming nanosecond precision + return asInt(param) + } + return int64(d) +} + +// asIntFromType calls the proper function to parse param as int64, +// given a field's Type t. +func asIntFromType(t reflect.Type, param string) int64 { + switch t { + case timeDurationType: + return asIntFromTimeDuration(param) + default: + return asInt(param) + } +} + +// asUint returns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) uint64 { + i, err := strconv.ParseUint(param, 0, 64) + panicIf(err) + + return i +} + +// asFloat64 returns the parameter as a float64 +// or panics if it can't convert +func asFloat64(param string) float64 { + i, err := strconv.ParseFloat(param, 64) + panicIf(err) + return i +} + +// asFloat32 returns the parameter as a float32 +// or panics if it can't convert +func asFloat32(param string) float64 { + i, err := strconv.ParseFloat(param, 32) + panicIf(err) + return i +} + +// asBool returns the parameter as a bool +// or panics if it can't convert +func asBool(param string) bool { + i, err := strconv.ParseBool(param) + panicIf(err) + + return i +} + +func panicIf(err error) { + if err != nil { + panic(err.Error()) + } +} + +// Checks if field value matches regex. If fl.Field can be cast to Stringer, it uses the Stringer interfaces +// String() return value. Otherwise, it uses fl.Field's String() value. +func fieldMatchesRegexByStringerValOrString(regexFn func() *regexp.Regexp, fl FieldLevel) bool { + regex := regexFn() + switch fl.Field().Kind() { + case reflect.String: + return regex.MatchString(fl.Field().String()) + default: + if stringer, ok := getValue(fl.Field()).(fmt.Stringer); ok { + return regex.MatchString(stringer.String()) + } else { + return regex.MatchString(fl.Field().String()) + } + } +} diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go new file mode 100644 index 0000000000..995b0e19ad --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -0,0 +1,523 @@ +package validator + +import ( + "context" + "fmt" + "reflect" + "strconv" + "unsafe" +) + +// per validate construct +type validate struct { + v *Validate + top reflect.Value + ns []byte + actualNs []byte + errs ValidationErrors + includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise + ffn FilterFunc + slflParent reflect.Value // StructLevel & FieldLevel + slCurrent reflect.Value // StructLevel & FieldLevel + flField reflect.Value // StructLevel & FieldLevel + cf *cField // StructLevel & FieldLevel + ct *cTag // StructLevel & FieldLevel + misc []byte // misc reusable + str1 string // misc reusable + str2 string // misc reusable + fldIsPointer bool // StructLevel & FieldLevel + isPartial bool + hasExcludes bool +} + +// parent and current will be the same the first run of validateStruct +func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) { + cs, ok := v.v.structCache.Get(typ) + if !ok { + cs = v.v.extractStructCache(current, typ.Name()) + } + + if len(ns) == 0 && len(cs.name) != 0 { + ns = append(ns, cs.name...) + ns = append(ns, '.') + + structNs = append(structNs, cs.name...) + structNs = append(structNs, '.') + } + + // ct is nil on top level struct, and structs as fields that have no tag info + // so if nil or if not nil and the structonly tag isn't present + if ct == nil || ct.typeof != typeStructOnly { + var f *cField + + for i := 0; i < len(cs.fields); i++ { + f = cs.fields[i] + + if v.isPartial { + if v.ffn != nil { + // used with StructFiltered + if v.ffn(append(structNs, f.name...)) { + continue + } + } else { + // used with StructPartial & StructExcept + _, ok = v.includeExclude[string(append(structNs, f.name...))] + + if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) { + continue + } + } + } + + v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + // first iteration will have no info about nostructlevel tag, and is checked prior to + // calling the next iteration of validateStruct called from traverseField. + if cs.fn != nil { + v.slflParent = parent + v.slCurrent = current + v.ns = ns + v.actualNs = structNs + + cs.fn(ctx, v) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) { + var typ reflect.Type + var kind reflect.Kind + + current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + + var isNestedStruct bool + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault { + return + } + + if ct.typeof == typeOmitNil && (kind != reflect.Invalid && current.IsNil()) { + return + } + + if ct.typeof == typeOmitZero { + return + } + + if ct.hasTag { + if kind == reflect.Invalid { + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + param: ct.param, + kind: kind, + }, + ) + return + } + + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + if !ct.runValidationWhenNil { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: getValue(current), + param: ct.param, + kind: kind, + typ: current.Type(), + }, + ) + return + } + } + + if kind == reflect.Invalid { + return + } + + case reflect.Struct: + isNestedStruct = !current.Type().ConvertibleTo(timeType) + // For backward compatibility before struct level validation tags were supported + // as there were a number of projects relying on `required` not failing on non-pointer + // structs. Since it's basically nonsensical to use `required` with a non-pointer struct + // are explicitly skipping the required validation for it. This WILL be removed in the + // next major version. + if isNestedStruct && !v.v.requiredStructEnabled && ct != nil && ct.tag == requiredTag { + ct = ct.next + } + } + + typ = current.Type() + +OUTER: + for { + if ct == nil || !ct.hasTag || (isNestedStruct && len(cf.name) == 0) { + // isNestedStruct check here + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } + return + } + + switch ct.typeof { + case typeNoStructLevel: + return + + case typeStructOnly: + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } + return + + case typeOmitEmpty: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !hasValue(v) { + return + } + + ct = ct.next + continue + + case typeOmitZero: + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !hasNotZeroValue(v) { + return + } + + ct = ct.next + continue + + case typeOmitNil: + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + switch field := v.Field(); field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + if field.IsNil() { + return + } + default: + if v.fldIsPointer && getValue(field) == nil { + return + } + } + + ct = ct.next + continue + + case typeEndKeys: + return + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + var i64 int64 + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + i64 = int64(i) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct) + } + + case reflect.Map: + + var pv string + reusableCF := &cField{} + + for _, key := range current.MapKeys() { + pv = fmt.Sprintf("%v", key) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + + if ct != nil && ct.typeof == typeKeys && ct.keys != nil { + v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys) + // can be nil when just keys being validated + if ct.next != nil { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next) + } else { + // Struct fallback when map values are structs + val := current.MapIndex(key) + switch val.Kind() { + case reflect.Ptr: + if val.Elem().Kind() == reflect.Struct { + // Dive into the struct so its own tags run + v.traverseField(ctx, parent, val, ns, structNs, reusableCF, nil) + } + case reflect.Struct: + v.traverseField(ctx, parent, val, ns, structNs, reusableCF, nil) + } + } + } else { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct) + } + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + v.misc = v.misc[0:0] + + for { + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if ct.fn(ctx, v) { + if ct.isBlockEnd { + ct = ct.next + continue OUTER + } + + // drain rest of the 'or' values, then continue or leave + for { + ct = ct.next + + if ct == nil { + continue OUTER + } + + if ct.typeof != typeOr { + continue OUTER + } + + if ct.isBlockEnd { + ct = ct.next + continue OUTER + } + } + } + + v.misc = append(v.misc, '|') + v.misc = append(v.misc, ct.tag...) + + if ct.hasParam { + v.misc = append(v.misc, '=') + v.misc = append(v.misc, ct.param...) + } + + if ct.isBlockEnd || ct.next == nil { + // if we get here, no valid 'or' value and no more tags + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if ct.hasAlias { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.actualAliasTag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: getValue(current), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } else { + tVal := string(v.misc)[1:] + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tVal, + actualTag: tVal, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: getValue(current), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } + + return + } + + ct = ct.next + } + + default: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: getValue(current), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + return + } + ct = ct.next + } + } +} + +func getValue(val reflect.Value) interface{} { + if val.CanInterface() { + return val.Interface() + } + + if val.CanAddr() { + return reflect.NewAt(val.Type(), unsafe.Pointer(val.UnsafeAddr())).Elem().Interface() + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return val.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return val.Uint() + case reflect.Complex64, reflect.Complex128: + return val.Complex() + case reflect.Float32, reflect.Float64: + return val.Float() + default: + return val.String() + } +} diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go new file mode 100644 index 0000000000..5ba64e5ba5 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -0,0 +1,757 @@ +package validator + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + ut "github.com/go-playground/universal-translator" +) + +const ( + defaultTagName = "validate" + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitzero = "omitzero" + omitempty = "omitempty" + omitnil = "omitnil" + isdefault = "isdefault" + requiredWithoutAllTag = "required_without_all" + requiredWithoutTag = "required_without" + requiredWithTag = "required_with" + requiredWithAllTag = "required_with_all" + requiredIfTag = "required_if" + requiredUnlessTag = "required_unless" + skipUnlessTag = "skip_unless" + excludedWithoutAllTag = "excluded_without_all" + excludedWithoutTag = "excluded_without" + excludedWithTag = "excluded_with" + excludedWithAllTag = "excluded_with_all" + excludedIfTag = "excluded_if" + excludedUnlessTag = "excluded_unless" + skipValidationTag = "-" + diveTag = "dive" + keysTag = "keys" + endKeysTag = "endkeys" + requiredTag = "required" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + timeDurationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + + byteSliceType = reflect.TypeOf([]byte{}) + + defaultCField = &cField{namesEqual: true} +) + +// FilterFunc is the type used to filter fields using +// StructFiltered(...) function. +// returning true results in the field being filtered/skipped from +// validation +type FilterFunc func(ns []byte) bool + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// TagNameFunc allows for adding of a custom tag name parser +type TagNameFunc func(field reflect.StructField) string + +type internalValidationFuncWrapper struct { + fn FuncCtx + runValidationOnNil bool +} + +// Validate contains the validator settings and cache +type Validate struct { + tagName string + pool *sync.Pool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + rules map[reflect.Type]map[string]string + tagCache *tagCache + structCache *structCache + hasCustomFuncs bool + hasTagNameFunc bool + requiredStructEnabled bool + privateFieldValidation bool +} + +// New returns a new instance of 'validate' with sane defaults. +// Validate is designed to be thread-safe and used as a singleton instance. +// It caches information about your struct and validations, +// in essence only parsing your validation tags once per struct type. +// Using multiple instances neglects the benefit of caching. +func New(options ...Option) *Validate { + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: defaultTagName, + aliases: make(map[string]string, len(bakedInAliases)), + validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)), + tagCache: tc, + structCache: sc, + } + + // must copy alias validators for separate validations to be used in each validator instance + for k, val := range bakedInAliases { + v.RegisterAlias(k, val) + } + + // must copy validators for separate validations to be used in each instance + for k, val := range bakedInValidators { + switch k { + // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour + case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag, + excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag, + skipUnlessTag: + _ = v.registerValidation(k, wrapFunc(val), true, true) + default: + // no need to error check here, baked in will always be valid + _ = v.registerValidation(k, wrapFunc(val), true, false) + } + } + + v.pool = &sync.Pool{ + New: func() interface{} { + return &validate{ + v: v, + ns: make([]byte, 0, 64), + actualNs: make([]byte, 0, 64), + misc: make([]byte, 32), + } + }, + } + + for _, o := range options { + o(v) + } + return v +} + +// SetTagName allows for changing of the default tag name of 'validate' +func (v *Validate) SetTagName(name string) { + v.tagName = name +} + +// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual +// validation information via context.Context. +func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { + errs := make(map[string]interface{}) + for field, rule := range rules { + if ruleObj, ok := rule.(map[string]interface{}); ok { + if dataObj, ok := data[field].(map[string]interface{}); ok { + err := v.ValidateMapCtx(ctx, dataObj, ruleObj) + if len(err) > 0 { + errs[field] = err + } + } else if dataObjs, ok := data[field].([]map[string]interface{}); ok { + for _, obj := range dataObjs { + err := v.ValidateMapCtx(ctx, obj, ruleObj) + if len(err) > 0 { + errs[field] = err + } + } + } else { + errs[field] = errors.New("The field: '" + field + "' is not a map to dive") + } + } else if ruleStr, ok := rule.(string); ok { + err := v.VarWithKeyCtx(ctx, field, data[field], ruleStr) + if err != nil { + errs[field] = err + } + } + } + return errs +} + +// ValidateMap validates map data from a map of tags +func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { + return v.ValidateMapCtx(context.Background(), data, rules) +} + +// RegisterTagNameFunc registers a function to get alternate names for StructFields. +// +// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names: +// +// validate.RegisterTagNameFunc(func(fld reflect.StructField) string { +// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// // skip if tag key says it should be ignored +// if name == "-" { +// return "" +// } +// return name +// }) +func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) { + v.tagNameFunc = fn + v.hasTagNameFunc = true +} + +// RegisterValidation adds a validation with the given tag +// +// NOTES: +// - if the key already exists, the previous validation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error { + return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...) +} + +// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation +// allowing context.Context validation support. +func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error { + var nilCheckable bool + if len(callValidationEvenIfNull) > 0 { + nilCheckable = callValidationEvenIfNull[0] + } + return v.registerValidation(tag, fn, false, nilCheckable) +} + +// RegisterAlias registers a mapping of a single validation tag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAlias(alias, tags string) { + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliases[alias] = tags +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...) +} + +// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing +// of contextual validation information via context.Context. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) { + if v.structLevelFuncs == nil { + v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx) + } + + for _, t := range types { + tv := reflect.ValueOf(t) + if tv.Kind() == reflect.Ptr { + t = reflect.Indirect(tv).Interface() + } + + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } +} + +// RegisterStructValidationMapRules registers validate map rules. +// Be aware that map validation rules supersede those defined on a/the struct if present. +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationMapRules(rules map[string]string, types ...interface{}) { + if v.rules == nil { + v.rules = make(map[reflect.Type]map[string]string) + } + + deepCopyRules := make(map[string]string) + for i, rule := range rules { + deepCopyRules[i] = rule + } + + for _, t := range types { + typ := reflect.TypeOf(t) + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + if typ.Kind() != reflect.Struct { + continue + } + v.rules[typ] = deepCopyRules + } +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + if v.customFuncs == nil { + v.customFuncs = make(map[reflect.Type]CustomTypeFunc) + } + + for _, t := range types { + v.customFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterTranslation registers translations against the provided tag. +func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) { + if v.transTagFunc == nil { + v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc) + } + + if err = registerFn(trans); err != nil { + return + } + + m, ok := v.transTagFunc[trans] + if !ok { + m = make(map[string]TranslationFunc) + v.transTagFunc[trans] = m + } + + m[tag] = translationFn + + return +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) Struct(s interface{}) error { + return v.StructCtx(context.Background(), s) +} + +// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified +// and also allows passing of context.Context for contextual validation information. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = false + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error { + return v.StructFilteredCtx(context.Background(), s, fn) +} + +// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified and also allows passing of contextual validation information via +// context.Context +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = fn + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartial(s interface{}, fields ...string) error { + return v.StructPartialCtx(context.Background(), s, fields...) +} + +// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual +// validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = false + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, k := range fields { + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + vd.misc = append(vd.misc[0:0], name...) + // Don't append empty name for unnamed structs + if len(vd.misc) != 0 { + vd.misc = append(vd.misc, '.') + } + + for _, s := range flds { + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + vd.misc = append(vd.misc, s[:idx]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + vd.misc = append(vd.misc, s[idx:idx2]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + vd.misc = append(vd.misc, s...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.misc = append(vd.misc, '.') + } + } + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExcept(s interface{}, fields ...string) error { + return v.StructExceptCtx(context.Background(), s, fields...) +} + +// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual +// validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = true + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, key := range fields { + vd.misc = vd.misc[0:0] + + if len(name) > 0 { + vd.misc = append(vd.misc, name...) + vd.misc = append(vd.misc, '.') + } + + vd.misc = append(vd.misc, key...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// Var validates a single variable using tag style validation. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Var(field interface{}, tag string) error { + return v.VarCtx(context.Background(), field, tag) +} + +// VarCtx validates a single variable using tag style validation and allows passing of contextual +// validation information via context.Context. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithValue validates a single variable, against another variable/field's value using tag style validation +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error { + return v.VarWithValueCtx(context.Background(), field, other, tag) +} + +// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and +// allows passing of contextual validation information via context.Context. +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + ctag := v.fetchCacheTag(tag) + otherVal := reflect.ValueOf(other) + vd := v.pool.Get().(*validate) + vd.top = otherVal + vd.isPartial = false + vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithKey validates a single variable with a key to be included in the returned error using tag style validation +// eg. +// var s string +// validate.VarWithKey("email_address", s, "required,email") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithKey(key string, field interface{}, tag string) error { + return v.VarWithKeyCtx(context.Background(), key, field, tag) +} + +// VarWithKeyCtx validates a single variable with a key to be included in the returned error using tag style validation +// and allows passing of contextual validation information via context.Context. +// eg. +// var s string +// validate.VarWithKeyCtx("email_address", s, "required,email") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithKeyCtx(ctx context.Context, key string, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + + cField := &cField{ + name: key, + altName: key, + namesEqual: true, + } + + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], cField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error { + if len(tag) == 0 { + return errors.New("function Key cannot be empty") + } + + if fn == nil { + return errors.New("function cannot be empty") + } + + _, ok := restrictedTags[tag] + if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidationOnNil: nilCheckable} + return nil +} diff --git a/vendor/github.com/leodido/go-urn/.gitignore b/vendor/github.com/leodido/go-urn/.gitignore new file mode 100644 index 0000000000..427454f8f1 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/.gitignore @@ -0,0 +1,13 @@ +*.exe +*.dll +*.so +*.dylib + +*.test + +*.out +*.txt + +vendor/ +/removecomments +/snake2camel \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/LICENSE b/vendor/github.com/leodido/go-urn/LICENSE new file mode 100644 index 0000000000..8c3504a5a9 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Leonardo Di Donato + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/leodido/go-urn/README.md b/vendor/github.com/leodido/go-urn/README.md new file mode 100644 index 0000000000..619475bfbb --- /dev/null +++ b/vendor/github.com/leodido/go-urn/README.md @@ -0,0 +1,153 @@ +[![Build](https://img.shields.io/circleci/build/github/leodido/go-urn?style=for-the-badge)](https://app.circleci.com/pipelines/github/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn) + +**A parser for URNs**. + +> As seen on [RFC 2141](https://datatracker.ietf.org/doc/html/rfc2141), [RFC 7643](https://datatracker.ietf.org/doc/html/rfc7643#section-10), and on [RFC 8141](https://datatracker.ietf.org/doc/html/rfc8141). + +[API documentation](https://godoc.org/github.com/leodido/go-urn). + +Starting with version 1.3 this library also supports [RFC 7643 SCIM URNs](https://datatracker.ietf.org/doc/html/rfc7643#section-10). + +Starting with version 1.4 this library also supports [RFC 8141 URNs (2017)](https://datatracker.ietf.org/doc/html/rfc8141). + +## Installation + +``` +go get github.com/leodido/go-urn +``` + +## Features + +1. RFC 2141 URNs parsing (default) +2. RFC 8141 URNs parsing (supersedes RFC 2141) +3. RFC 7643 SCIM URNs parsing +4. Normalization as per RFCs +5. Lexical equivalence as per RFCs +6. Precise, fine-grained errors + +## Performances + +This implementation results to be really fast. + +Usually below 400 ns on my machine[1](#mymachine). + +Notice it also performs, while parsing: + +1. fine-grained and informative erroring +2. specific-string normalization + +``` +ok/00/urn:a:b______________________________________/-10 51372006 109.0 ns/op 275 B/op 3 allocs/op +ok/01/URN:foo:a123,456_____________________________/-10 36024072 160.8 ns/op 296 B/op 6 allocs/op +ok/02/urn:foo:a123%2C456___________________________/-10 31901007 188.4 ns/op 320 B/op 7 allocs/op +ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-10 22736756 266.6 ns/op 376 B/op 6 allocs/op +ok/04/urn:ietf:params:scim:schemas:extension:enterp/-10 18291859 335.2 ns/op 408 B/op 6 allocs/op +ok/05/urn:ietf:params:scim:schemas:extension:enterp/-10 15283087 379.4 ns/op 440 B/op 6 allocs/op +ok/06/urn:burnout:nss______________________________/-10 39407593 155.1 ns/op 288 B/op 6 allocs/op +ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-10 27832718 211.4 ns/op 307 B/op 4 allocs/op +ok/08/urn:urnurnurn:urn____________________________/-10 33269596 168.1 ns/op 293 B/op 6 allocs/op +ok/09/urn:ciao:!!*_________________________________/-10 41100675 148.8 ns/op 288 B/op 6 allocs/op +ok/10/urn:ciao:=@__________________________________/-10 37214253 149.7 ns/op 284 B/op 6 allocs/op +ok/11/urn:ciao:@!=%2C(xyz)+a,b.*@g=$_'_____________/-10 26534240 229.8 ns/op 336 B/op 7 allocs/op +ok/12/URN:x:abc%1Dz%2F%3az_________________________/-10 28166396 211.8 ns/op 336 B/op 7 allocs/op +no/13/URN:---xxx:x_________________________________/-10 23635159 255.6 ns/op 419 B/op 5 allocs/op +no/14/urn::colon:nss_______________________________/-10 23594779 258.4 ns/op 419 B/op 5 allocs/op +no/15/URN:@,:x_____________________________________/-10 23742535 261.5 ns/op 419 B/op 5 allocs/op +no/16/URN:URN:NSS__________________________________/-10 27432714 223.3 ns/op 371 B/op 5 allocs/op +no/17/urn:UrN:NSS__________________________________/-10 26922117 224.9 ns/op 371 B/op 5 allocs/op +no/18/urn:a:%______________________________________/-10 24926733 224.6 ns/op 371 B/op 5 allocs/op +no/19/urn:urn:NSS__________________________________/-10 27652641 220.7 ns/op 371 B/op 5 allocs/op +``` + +* [1]: Apple M1 Pro + + +## Example + +For more examples take a look at the [examples file](examples_test.go). + + +```go +package main + +import ( + "fmt" + "github.com/leodido/go-urn" +) + +func main() { + var uid = "URN:foo:a123,456" + + // Parse the input string as a RFC 2141 URN only + u, e := urn.NewMachine().Parse(uid) + if e != nil { + fmt.Errorf(err) + + return + } + + fmt.Println(u.ID) + fmt.Println(u.SS) + + // Output: + // foo + // a123,456 +} +``` + +```go +package main + +import ( + "fmt" + "github.com/leodido/go-urn" +) + +func main() { + var uid = "URN:foo:a123,456" + + // Parse the input string as a RFC 2141 URN only + u, ok := urn.Parse([]byte(uid)) + if !ok { + panic("error parsing urn") + } + + fmt.Println(u.ID) + fmt.Println(u.SS) + + // Output: + // foo + // a123,456 +} +``` + +```go +package main + +import ( + "fmt" + "github.com/leodido/go-urn" +) + +func main() { + input := "urn:ietf:params:scim:api:messages:2.0:ListResponse" + + // Parsing the input string as a RFC 7643 SCIM URN + u, ok := urn.Parse([]byte(input), urn.WithParsingMode(urn.RFC7643Only)) + if !ok { + panic("error parsing urn") + } + + fmt.Println(u.IsSCIM()) + scim := u.SCIM() + fmt.Println(scim.Type.String()) + fmt.Println(scim.Name) + fmt.Println(scim.Other) + + // Output: + // true + // api + // messages + // 2.0:ListResponse +} +``` \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/kind.go b/vendor/github.com/leodido/go-urn/kind.go new file mode 100644 index 0000000000..f5e140f0a4 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/kind.go @@ -0,0 +1,10 @@ +package urn + +type Kind int + +const ( + NONE Kind = iota + RFC2141 + RFC7643 + RFC8141 +) diff --git a/vendor/github.com/leodido/go-urn/machine.go b/vendor/github.com/leodido/go-urn/machine.go new file mode 100644 index 0000000000..aec1ba69cb --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go @@ -0,0 +1,5046 @@ +package urn + +import ( + "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]" + errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]" + errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]" + errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]" + errSCIMOther = "expecting a well-formed other SCIM part [col %d]" + errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]" + err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]" + err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]" + err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]" + err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]" + err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]" + err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" + err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" +) +var _toStateActions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 33, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _eofActions []byte = []byte{ + 0, 1, 1, 1, 1, 4, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 8, 9, + 9, 4, 4, 11, 1, 1, 1, 1, + 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 12, 14, 14, 14, 14, 16, 18, 20, + 20, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 1, 1, 1, 1, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 24, 24, 25, 25, 0, 26, 28, + 28, 29, 29, 30, 30, 26, 26, 31, + 31, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 21, + 21, 22, 22, 22, 34, 34, 35, 37, + 37, 38, 40, 41, 41, 38, 42, 42, + 42, 44, 42, 48, 48, 48, 50, 44, + 50, 0, +} + +const start int = 1 +const firstFinal int = 172 + +const enScimOnly int = 44 +const enRfc8141Only int = 83 +const enFail int = 193 +const enMain int = 1 + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) + WithParsingMode(ParsingMode) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + startParsingAt int + parsingMode ParsingMode + parsingModeSet bool +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine(options ...Option) Machine { + m := &machine{ + parsingModeSet: false, + } + + for _, o := range options { + o(m) + } + // Set default parsing mode + if !m.parsingModeSet { + m.WithParsingMode(DefaultParsingMode) + } + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 or RFC7643 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.cs = m.startParsingAt + output := &URN{ + tolower: []int{}, + } + { + if (m.p) == (m.pe) { + goto _testEof + } + if m.cs == 0 { + goto _out + } + _resume: + switch m.cs { + case 1: + switch (m.data)[(m.p)] { + case 85: + goto tr1 + case 117: + goto tr1 + } + goto tr0 + case 0: + goto _out + case 2: + switch (m.data)[(m.p)] { + case 82: + goto tr2 + case 114: + goto tr2 + } + goto tr0 + case 3: + switch (m.data)[(m.p)] { + case 78: + goto tr3 + case 110: + goto tr3 + } + goto tr0 + case 4: + if (m.data)[(m.p)] == 58 { + goto tr4 + } + goto tr0 + case 5: + switch (m.data)[(m.p)] { + case 85: + goto tr7 + case 117: + goto tr7 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr6 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr6 + } + default: + goto tr6 + } + goto tr5 + case 6: + switch (m.data)[(m.p)] { + case 45: + goto tr9 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr9 + } + default: + goto tr9 + } + goto tr8 + case 7: + switch (m.data)[(m.p)] { + case 45: + goto tr11 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr11 + } + default: + goto tr11 + } + goto tr8 + case 8: + switch (m.data)[(m.p)] { + case 45: + goto tr12 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr12 + } + default: + goto tr12 + } + goto tr8 + case 9: + switch (m.data)[(m.p)] { + case 45: + goto tr13 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr13 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr13 + } + default: + goto tr13 + } + goto tr8 + case 10: + switch (m.data)[(m.p)] { + case 45: + goto tr14 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr14 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr14 + } + default: + goto tr14 + } + goto tr8 + case 11: + switch (m.data)[(m.p)] { + case 45: + goto tr15 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr15 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr15 + } + default: + goto tr15 + } + goto tr8 + case 12: + switch (m.data)[(m.p)] { + case 45: + goto tr16 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr16 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr16 + } + default: + goto tr16 + } + goto tr8 + case 13: + switch (m.data)[(m.p)] { + case 45: + goto tr17 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr17 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr17 + } + default: + goto tr17 + } + goto tr8 + case 14: + switch (m.data)[(m.p)] { + case 45: + goto tr18 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr18 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr18 + } + default: + goto tr18 + } + goto tr8 + case 15: + switch (m.data)[(m.p)] { + case 45: + goto tr19 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr19 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr19 + } + default: + goto tr19 + } + goto tr8 + case 16: + switch (m.data)[(m.p)] { + case 45: + goto tr20 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr20 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr20 + } + default: + goto tr20 + } + goto tr8 + case 17: + switch (m.data)[(m.p)] { + case 45: + goto tr21 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr21 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr21 + } + default: + goto tr21 + } + goto tr8 + case 18: + switch (m.data)[(m.p)] { + case 45: + goto tr22 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr22 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr22 + } + default: + goto tr22 + } + goto tr8 + case 19: + switch (m.data)[(m.p)] { + case 45: + goto tr23 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr23 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr23 + } + default: + goto tr23 + } + goto tr8 + case 20: + switch (m.data)[(m.p)] { + case 45: + goto tr24 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr24 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr24 + } + default: + goto tr24 + } + goto tr8 + case 21: + switch (m.data)[(m.p)] { + case 45: + goto tr25 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr25 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr25 + } + default: + goto tr25 + } + goto tr8 + case 22: + switch (m.data)[(m.p)] { + case 45: + goto tr26 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr26 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr26 + } + default: + goto tr26 + } + goto tr8 + case 23: + switch (m.data)[(m.p)] { + case 45: + goto tr27 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr27 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr27 + } + default: + goto tr27 + } + goto tr8 + case 24: + switch (m.data)[(m.p)] { + case 45: + goto tr28 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr28 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr28 + } + default: + goto tr28 + } + goto tr8 + case 25: + switch (m.data)[(m.p)] { + case 45: + goto tr29 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr29 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr29 + } + default: + goto tr29 + } + goto tr8 + case 26: + switch (m.data)[(m.p)] { + case 45: + goto tr30 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr30 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr30 + } + default: + goto tr30 + } + goto tr8 + case 27: + switch (m.data)[(m.p)] { + case 45: + goto tr31 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr31 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr31 + } + default: + goto tr31 + } + goto tr8 + case 28: + switch (m.data)[(m.p)] { + case 45: + goto tr32 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr32 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr32 + } + default: + goto tr32 + } + goto tr8 + case 29: + switch (m.data)[(m.p)] { + case 45: + goto tr33 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr33 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr33 + } + default: + goto tr33 + } + goto tr8 + case 30: + switch (m.data)[(m.p)] { + case 45: + goto tr34 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr34 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr34 + } + default: + goto tr34 + } + goto tr8 + case 31: + switch (m.data)[(m.p)] { + case 45: + goto tr35 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr35 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr35 + } + default: + goto tr35 + } + goto tr8 + case 32: + switch (m.data)[(m.p)] { + case 45: + goto tr36 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr36 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr36 + } + default: + goto tr36 + } + goto tr8 + case 33: + switch (m.data)[(m.p)] { + case 45: + goto tr37 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr37 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr37 + } + default: + goto tr37 + } + goto tr8 + case 34: + switch (m.data)[(m.p)] { + case 45: + goto tr38 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr38 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr38 + } + default: + goto tr38 + } + goto tr8 + case 35: + switch (m.data)[(m.p)] { + case 45: + goto tr39 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr39 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr39 + } + default: + goto tr39 + } + goto tr8 + case 36: + switch (m.data)[(m.p)] { + case 45: + goto tr40 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr40 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr40 + } + default: + goto tr40 + } + goto tr8 + case 37: + if (m.data)[(m.p)] == 58 { + goto tr10 + } + goto tr8 + case 38: + switch (m.data)[(m.p)] { + case 33: + goto tr42 + case 36: + goto tr42 + case 37: + goto tr43 + case 61: + goto tr42 + case 95: + goto tr42 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr42 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr42 + } + case (m.data)[(m.p)] >= 64: + goto tr42 + } + default: + goto tr42 + } + goto tr41 + case 172: + switch (m.data)[(m.p)] { + case 33: + goto tr212 + case 36: + goto tr212 + case 37: + goto tr213 + case 61: + goto tr212 + case 95: + goto tr212 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr212 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr212 + } + case (m.data)[(m.p)] >= 64: + goto tr212 + } + default: + goto tr212 + } + goto tr41 + case 39: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr45 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr45 + } + default: + goto tr46 + } + goto tr44 + case 40: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr47 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr47 + } + default: + goto tr48 + } + goto tr44 + case 173: + switch (m.data)[(m.p)] { + case 33: + goto tr212 + case 36: + goto tr212 + case 37: + goto tr213 + case 61: + goto tr212 + case 95: + goto tr212 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr212 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr212 + } + case (m.data)[(m.p)] >= 64: + goto tr212 + } + default: + goto tr212 + } + goto tr44 + case 41: + switch (m.data)[(m.p)] { + case 45: + goto tr9 + case 58: + goto tr10 + case 82: + goto tr49 + case 114: + goto tr49 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr9 + } + default: + goto tr9 + } + goto tr5 + case 42: + switch (m.data)[(m.p)] { + case 45: + goto tr11 + case 58: + goto tr10 + case 78: + goto tr50 + case 110: + goto tr50 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr11 + } + default: + goto tr11 + } + goto tr5 + case 43: + if (m.data)[(m.p)] == 45 { + goto tr12 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr12 + } + default: + goto tr12 + } + goto tr51 + case 44: + switch (m.data)[(m.p)] { + case 85: + goto tr52 + case 117: + goto tr52 + } + goto tr0 + case 45: + switch (m.data)[(m.p)] { + case 82: + goto tr53 + case 114: + goto tr53 + } + goto tr0 + case 46: + switch (m.data)[(m.p)] { + case 78: + goto tr54 + case 110: + goto tr54 + } + goto tr0 + case 47: + if (m.data)[(m.p)] == 58 { + goto tr55 + } + goto tr0 + case 48: + if (m.data)[(m.p)] == 105 { + goto tr57 + } + goto tr56 + case 49: + if (m.data)[(m.p)] == 101 { + goto tr58 + } + goto tr56 + case 50: + if (m.data)[(m.p)] == 116 { + goto tr59 + } + goto tr56 + case 51: + if (m.data)[(m.p)] == 102 { + goto tr60 + } + goto tr56 + case 52: + if (m.data)[(m.p)] == 58 { + goto tr61 + } + goto tr56 + case 53: + if (m.data)[(m.p)] == 112 { + goto tr62 + } + goto tr56 + case 54: + if (m.data)[(m.p)] == 97 { + goto tr63 + } + goto tr56 + case 55: + if (m.data)[(m.p)] == 114 { + goto tr64 + } + goto tr56 + case 56: + if (m.data)[(m.p)] == 97 { + goto tr65 + } + goto tr56 + case 57: + if (m.data)[(m.p)] == 109 { + goto tr66 + } + goto tr56 + case 58: + if (m.data)[(m.p)] == 115 { + goto tr67 + } + goto tr56 + case 59: + if (m.data)[(m.p)] == 58 { + goto tr68 + } + goto tr56 + case 60: + if (m.data)[(m.p)] == 115 { + goto tr69 + } + goto tr56 + case 61: + if (m.data)[(m.p)] == 99 { + goto tr70 + } + goto tr56 + case 62: + if (m.data)[(m.p)] == 105 { + goto tr71 + } + goto tr56 + case 63: + if (m.data)[(m.p)] == 109 { + goto tr72 + } + goto tr56 + case 64: + if (m.data)[(m.p)] == 58 { + goto tr73 + } + goto tr56 + case 65: + switch (m.data)[(m.p)] { + case 97: + goto tr75 + case 112: + goto tr76 + case 115: + goto tr77 + } + goto tr74 + case 66: + if (m.data)[(m.p)] == 112 { + goto tr78 + } + goto tr74 + case 67: + if (m.data)[(m.p)] == 105 { + goto tr79 + } + goto tr74 + case 68: + if (m.data)[(m.p)] == 58 { + goto tr80 + } + goto tr74 + case 69: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr82 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr82 + } + default: + goto tr82 + } + goto tr81 + case 174: + if (m.data)[(m.p)] == 58 { + goto tr215 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr214 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr214 + } + default: + goto tr214 + } + goto tr81 + case 70: + switch (m.data)[(m.p)] { + case 33: + goto tr84 + case 36: + goto tr84 + case 37: + goto tr85 + case 61: + goto tr84 + case 95: + goto tr84 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr84 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr84 + } + case (m.data)[(m.p)] >= 64: + goto tr84 + } + default: + goto tr84 + } + goto tr83 + case 175: + switch (m.data)[(m.p)] { + case 33: + goto tr216 + case 36: + goto tr216 + case 37: + goto tr217 + case 61: + goto tr216 + case 95: + goto tr216 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr216 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr216 + } + case (m.data)[(m.p)] >= 64: + goto tr216 + } + default: + goto tr216 + } + goto tr83 + case 71: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr87 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr87 + } + default: + goto tr88 + } + goto tr86 + case 72: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr89 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr89 + } + default: + goto tr90 + } + goto tr86 + case 176: + switch (m.data)[(m.p)] { + case 33: + goto tr216 + case 36: + goto tr216 + case 37: + goto tr217 + case 61: + goto tr216 + case 95: + goto tr216 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr216 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr216 + } + case (m.data)[(m.p)] >= 64: + goto tr216 + } + default: + goto tr216 + } + goto tr86 + case 73: + if (m.data)[(m.p)] == 97 { + goto tr91 + } + goto tr74 + case 74: + if (m.data)[(m.p)] == 114 { + goto tr92 + } + goto tr74 + case 75: + if (m.data)[(m.p)] == 97 { + goto tr93 + } + goto tr74 + case 76: + if (m.data)[(m.p)] == 109 { + goto tr79 + } + goto tr74 + case 77: + if (m.data)[(m.p)] == 99 { + goto tr94 + } + goto tr74 + case 78: + if (m.data)[(m.p)] == 104 { + goto tr95 + } + goto tr74 + case 79: + if (m.data)[(m.p)] == 101 { + goto tr96 + } + goto tr74 + case 80: + if (m.data)[(m.p)] == 109 { + goto tr97 + } + goto tr74 + case 81: + if (m.data)[(m.p)] == 97 { + goto tr98 + } + goto tr74 + case 82: + if (m.data)[(m.p)] == 115 { + goto tr79 + } + goto tr74 + case 83: + switch (m.data)[(m.p)] { + case 85: + goto tr99 + case 117: + goto tr99 + } + goto tr0 + case 84: + switch (m.data)[(m.p)] { + case 82: + goto tr100 + case 114: + goto tr100 + } + goto tr0 + case 85: + switch (m.data)[(m.p)] { + case 78: + goto tr101 + case 110: + goto tr101 + } + goto tr0 + case 86: + if (m.data)[(m.p)] == 58 { + goto tr102 + } + goto tr0 + case 87: + switch (m.data)[(m.p)] { + case 85: + goto tr105 + case 117: + goto tr105 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr104 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr104 + } + default: + goto tr104 + } + goto tr103 + case 88: + if (m.data)[(m.p)] == 45 { + goto tr107 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr108 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr108 + } + default: + goto tr108 + } + goto tr106 + case 89: + if (m.data)[(m.p)] == 45 { + goto tr109 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr106 + case 90: + if (m.data)[(m.p)] == 45 { + goto tr111 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 91: + if (m.data)[(m.p)] == 45 { + goto tr113 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr114 + } + default: + goto tr114 + } + goto tr106 + case 92: + if (m.data)[(m.p)] == 45 { + goto tr115 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 93: + if (m.data)[(m.p)] == 45 { + goto tr117 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr118 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr118 + } + default: + goto tr118 + } + goto tr106 + case 94: + if (m.data)[(m.p)] == 45 { + goto tr119 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr120 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr120 + } + default: + goto tr120 + } + goto tr106 + case 95: + if (m.data)[(m.p)] == 45 { + goto tr121 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr122 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr122 + } + default: + goto tr122 + } + goto tr106 + case 96: + if (m.data)[(m.p)] == 45 { + goto tr123 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr124 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr124 + } + default: + goto tr124 + } + goto tr106 + case 97: + if (m.data)[(m.p)] == 45 { + goto tr125 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr126 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr126 + } + default: + goto tr126 + } + goto tr106 + case 98: + if (m.data)[(m.p)] == 45 { + goto tr127 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr128 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr128 + } + default: + goto tr128 + } + goto tr106 + case 99: + if (m.data)[(m.p)] == 45 { + goto tr129 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr130 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr130 + } + default: + goto tr130 + } + goto tr106 + case 100: + if (m.data)[(m.p)] == 45 { + goto tr131 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr132 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr132 + } + default: + goto tr132 + } + goto tr106 + case 101: + if (m.data)[(m.p)] == 45 { + goto tr133 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr134 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr134 + } + default: + goto tr134 + } + goto tr106 + case 102: + if (m.data)[(m.p)] == 45 { + goto tr135 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr136 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr136 + } + default: + goto tr136 + } + goto tr106 + case 103: + if (m.data)[(m.p)] == 45 { + goto tr137 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr138 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr138 + } + default: + goto tr138 + } + goto tr106 + case 104: + if (m.data)[(m.p)] == 45 { + goto tr139 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr140 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr140 + } + default: + goto tr140 + } + goto tr106 + case 105: + if (m.data)[(m.p)] == 45 { + goto tr141 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr142 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr142 + } + default: + goto tr142 + } + goto tr106 + case 106: + if (m.data)[(m.p)] == 45 { + goto tr143 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr144 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr144 + } + default: + goto tr144 + } + goto tr106 + case 107: + if (m.data)[(m.p)] == 45 { + goto tr145 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr146 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr146 + } + default: + goto tr146 + } + goto tr106 + case 108: + if (m.data)[(m.p)] == 45 { + goto tr147 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr148 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr148 + } + default: + goto tr148 + } + goto tr106 + case 109: + if (m.data)[(m.p)] == 45 { + goto tr149 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr150 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr150 + } + default: + goto tr150 + } + goto tr106 + case 110: + if (m.data)[(m.p)] == 45 { + goto tr151 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr152 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr152 + } + default: + goto tr152 + } + goto tr106 + case 111: + if (m.data)[(m.p)] == 45 { + goto tr153 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr154 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr154 + } + default: + goto tr154 + } + goto tr106 + case 112: + if (m.data)[(m.p)] == 45 { + goto tr155 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr156 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr156 + } + default: + goto tr156 + } + goto tr106 + case 113: + if (m.data)[(m.p)] == 45 { + goto tr157 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr158 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr158 + } + default: + goto tr158 + } + goto tr106 + case 114: + if (m.data)[(m.p)] == 45 { + goto tr159 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr160 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr160 + } + default: + goto tr160 + } + goto tr106 + case 115: + if (m.data)[(m.p)] == 45 { + goto tr161 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr162 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr162 + } + default: + goto tr162 + } + goto tr106 + case 116: + if (m.data)[(m.p)] == 45 { + goto tr163 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr164 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr164 + } + default: + goto tr164 + } + goto tr106 + case 117: + if (m.data)[(m.p)] == 45 { + goto tr165 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr166 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr166 + } + default: + goto tr166 + } + goto tr106 + case 118: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr167 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr167 + } + default: + goto tr167 + } + goto tr106 + case 119: + if (m.data)[(m.p)] == 58 { + goto tr168 + } + goto tr106 + case 120: + switch (m.data)[(m.p)] { + case 33: + goto tr170 + case 37: + goto tr171 + case 61: + goto tr170 + case 95: + goto tr170 + case 126: + goto tr170 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr170 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr170 + } + case (m.data)[(m.p)] >= 64: + goto tr170 + } + default: + goto tr170 + } + goto tr169 + case 177: + switch (m.data)[(m.p)] { + case 33: + goto tr218 + case 35: + goto tr219 + case 37: + goto tr220 + case 61: + goto tr218 + case 63: + goto tr221 + case 95: + goto tr218 + case 126: + goto tr218 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr218 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr218 + } + default: + goto tr218 + } + goto tr169 + case 178: + switch (m.data)[(m.p)] { + case 33: + goto tr222 + case 37: + goto tr223 + case 61: + goto tr222 + case 95: + goto tr222 + case 126: + goto tr222 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr222 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr222 + } + default: + goto tr222 + } + goto tr183 + case 179: + switch (m.data)[(m.p)] { + case 33: + goto tr224 + case 37: + goto tr225 + case 61: + goto tr224 + case 95: + goto tr224 + case 126: + goto tr224 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr224 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr224 + } + default: + goto tr224 + } + goto tr183 + case 121: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr173 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr173 + } + default: + goto tr174 + } + goto tr172 + case 122: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr175 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr175 + } + default: + goto tr176 + } + goto tr172 + case 180: + switch (m.data)[(m.p)] { + case 33: + goto tr224 + case 37: + goto tr225 + case 61: + goto tr224 + case 95: + goto tr224 + case 126: + goto tr224 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr224 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr224 + } + default: + goto tr224 + } + goto tr172 + case 123: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr178 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr178 + } + default: + goto tr179 + } + goto tr177 + case 124: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr180 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr180 + } + default: + goto tr181 + } + goto tr177 + case 181: + switch (m.data)[(m.p)] { + case 33: + goto tr218 + case 35: + goto tr219 + case 37: + goto tr220 + case 61: + goto tr218 + case 63: + goto tr221 + case 95: + goto tr218 + case 126: + goto tr218 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr218 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr218 + } + default: + goto tr218 + } + goto tr177 + case 125: + switch (m.data)[(m.p)] { + case 43: + goto tr182 + case 61: + goto tr184 + } + goto tr183 + case 126: + switch (m.data)[(m.p)] { + case 33: + goto tr186 + case 37: + goto tr187 + case 61: + goto tr186 + case 63: + goto tr188 + case 95: + goto tr186 + case 126: + goto tr186 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr186 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr186 + } + case (m.data)[(m.p)] >= 64: + goto tr186 + } + default: + goto tr186 + } + goto tr185 + case 182: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 61: + goto tr226 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr185 + case 127: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr190 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr190 + } + default: + goto tr191 + } + goto tr189 + case 128: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr192 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr192 + } + default: + goto tr193 + } + goto tr189 + case 183: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 61: + goto tr226 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr189 + case 184: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 43: + goto tr230 + case 61: + goto tr231 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr185 + case 185: + switch (m.data)[(m.p)] { + case 33: + goto tr232 + case 35: + goto tr233 + case 37: + goto tr234 + case 47: + goto tr226 + case 61: + goto tr232 + case 63: + goto tr235 + case 95: + goto tr232 + case 126: + goto tr232 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr232 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr232 + } + default: + goto tr232 + } + goto tr185 + case 186: + switch (m.data)[(m.p)] { + case 33: + goto tr204 + case 35: + goto tr227 + case 37: + goto tr237 + case 47: + goto tr226 + case 61: + goto tr204 + case 63: + goto tr229 + case 95: + goto tr204 + case 126: + goto tr204 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr204 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr204 + } + default: + goto tr204 + } + goto tr236 + case 187: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr238 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr203 + case 129: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr195 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr195 + } + default: + goto tr196 + } + goto tr194 + case 130: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr197 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr197 + } + default: + goto tr198 + } + goto tr194 + case 188: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr238 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr194 + case 189: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr242 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr203 + case 190: + switch (m.data)[(m.p)] { + case 33: + goto tr243 + case 35: + goto tr244 + case 37: + goto tr245 + case 47: + goto tr238 + case 61: + goto tr243 + case 63: + goto tr246 + case 95: + goto tr243 + case 126: + goto tr243 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr243 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr243 + } + default: + goto tr243 + } + goto tr203 + case 131: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr200 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr200 + } + default: + goto tr201 + } + goto tr199 + case 132: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr197 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr197 + } + default: + goto tr198 + } + goto tr199 + case 133: + if (m.data)[(m.p)] == 43 { + goto tr202 + } + goto tr185 + case 191: + switch (m.data)[(m.p)] { + case 33: + goto tr232 + case 35: + goto tr233 + case 37: + goto tr234 + case 61: + goto tr232 + case 63: + goto tr247 + case 95: + goto tr232 + case 126: + goto tr232 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr232 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr232 + } + case (m.data)[(m.p)] >= 64: + goto tr232 + } + default: + goto tr232 + } + goto tr185 + case 134: + switch (m.data)[(m.p)] { + case 43: + goto tr202 + case 61: + goto tr184 + } + goto tr185 + case 135: + switch (m.data)[(m.p)] { + case 33: + goto tr204 + case 37: + goto tr205 + case 61: + goto tr204 + case 63: + goto tr206 + case 95: + goto tr204 + case 126: + goto tr204 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr204 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr204 + } + case (m.data)[(m.p)] >= 64: + goto tr204 + } + default: + goto tr204 + } + goto tr203 + case 136: + if (m.data)[(m.p)] == 61 { + goto tr207 + } + goto tr203 + case 192: + switch (m.data)[(m.p)] { + case 33: + goto tr243 + case 35: + goto tr244 + case 37: + goto tr245 + case 61: + goto tr243 + case 63: + goto tr248 + case 95: + goto tr243 + case 126: + goto tr243 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr243 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr243 + } + case (m.data)[(m.p)] >= 64: + goto tr243 + } + default: + goto tr243 + } + goto tr203 + case 137: + if (m.data)[(m.p)] == 58 { + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr167 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr167 + } + default: + goto tr167 + } + goto tr106 + case 138: + switch (m.data)[(m.p)] { + case 45: + goto tr165 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr166 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr166 + } + default: + goto tr166 + } + goto tr106 + case 139: + switch (m.data)[(m.p)] { + case 45: + goto tr163 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr164 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr164 + } + default: + goto tr164 + } + goto tr106 + case 140: + switch (m.data)[(m.p)] { + case 45: + goto tr161 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr162 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr162 + } + default: + goto tr162 + } + goto tr106 + case 141: + switch (m.data)[(m.p)] { + case 45: + goto tr159 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr160 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr160 + } + default: + goto tr160 + } + goto tr106 + case 142: + switch (m.data)[(m.p)] { + case 45: + goto tr157 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr158 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr158 + } + default: + goto tr158 + } + goto tr106 + case 143: + switch (m.data)[(m.p)] { + case 45: + goto tr155 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr156 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr156 + } + default: + goto tr156 + } + goto tr106 + case 144: + switch (m.data)[(m.p)] { + case 45: + goto tr153 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr154 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr154 + } + default: + goto tr154 + } + goto tr106 + case 145: + switch (m.data)[(m.p)] { + case 45: + goto tr151 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr152 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr152 + } + default: + goto tr152 + } + goto tr106 + case 146: + switch (m.data)[(m.p)] { + case 45: + goto tr149 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr150 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr150 + } + default: + goto tr150 + } + goto tr106 + case 147: + switch (m.data)[(m.p)] { + case 45: + goto tr147 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr148 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr148 + } + default: + goto tr148 + } + goto tr106 + case 148: + switch (m.data)[(m.p)] { + case 45: + goto tr145 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr146 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr146 + } + default: + goto tr146 + } + goto tr106 + case 149: + switch (m.data)[(m.p)] { + case 45: + goto tr143 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr144 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr144 + } + default: + goto tr144 + } + goto tr106 + case 150: + switch (m.data)[(m.p)] { + case 45: + goto tr141 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr142 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr142 + } + default: + goto tr142 + } + goto tr106 + case 151: + switch (m.data)[(m.p)] { + case 45: + goto tr139 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr140 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr140 + } + default: + goto tr140 + } + goto tr106 + case 152: + switch (m.data)[(m.p)] { + case 45: + goto tr137 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr138 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr138 + } + default: + goto tr138 + } + goto tr106 + case 153: + switch (m.data)[(m.p)] { + case 45: + goto tr135 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr136 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr136 + } + default: + goto tr136 + } + goto tr106 + case 154: + switch (m.data)[(m.p)] { + case 45: + goto tr133 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr134 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr134 + } + default: + goto tr134 + } + goto tr106 + case 155: + switch (m.data)[(m.p)] { + case 45: + goto tr131 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr132 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr132 + } + default: + goto tr132 + } + goto tr106 + case 156: + switch (m.data)[(m.p)] { + case 45: + goto tr129 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr130 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr130 + } + default: + goto tr130 + } + goto tr106 + case 157: + switch (m.data)[(m.p)] { + case 45: + goto tr127 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr128 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr128 + } + default: + goto tr128 + } + goto tr106 + case 158: + switch (m.data)[(m.p)] { + case 45: + goto tr125 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr126 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr126 + } + default: + goto tr126 + } + goto tr106 + case 159: + switch (m.data)[(m.p)] { + case 45: + goto tr123 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr124 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr124 + } + default: + goto tr124 + } + goto tr106 + case 160: + switch (m.data)[(m.p)] { + case 45: + goto tr121 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr122 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr122 + } + default: + goto tr122 + } + goto tr106 + case 161: + switch (m.data)[(m.p)] { + case 45: + goto tr119 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr120 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr120 + } + default: + goto tr120 + } + goto tr106 + case 162: + switch (m.data)[(m.p)] { + case 45: + goto tr117 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr118 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr118 + } + default: + goto tr118 + } + goto tr106 + case 163: + switch (m.data)[(m.p)] { + case 45: + goto tr115 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 164: + switch (m.data)[(m.p)] { + case 45: + goto tr113 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr114 + } + default: + goto tr114 + } + goto tr106 + case 165: + switch (m.data)[(m.p)] { + case 45: + goto tr111 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 166: + switch (m.data)[(m.p)] { + case 45: + goto tr109 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr106 + case 167: + switch (m.data)[(m.p)] { + case 45: + goto tr107 + case 82: + goto tr208 + case 114: + goto tr208 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr108 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr108 + } + default: + goto tr108 + } + goto tr103 + case 168: + switch (m.data)[(m.p)] { + case 45: + goto tr109 + case 58: + goto tr168 + case 78: + goto tr209 + case 110: + goto tr209 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr103 + case 169: + switch (m.data)[(m.p)] { + case 45: + goto tr210 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 170: + switch (m.data)[(m.p)] { + case 45: + goto tr113 + case 48: + goto tr211 + } + switch { + case (m.data)[(m.p)] < 65: + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr211 + } + default: + goto tr211 + } + goto tr106 + case 171: + if (m.data)[(m.p)] == 45 { + goto tr115 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 193: + switch (m.data)[(m.p)] { + case 10: + goto tr183 + case 13: + goto tr183 + } + goto tr249 + } + + tr183: + m.cs = 0 + goto _again + tr0: + m.cs = 0 + goto f0 + tr5: + m.cs = 0 + goto f3 + tr8: + m.cs = 0 + goto f5 + tr41: + m.cs = 0 + goto f7 + tr44: + m.cs = 0 + goto f8 + tr51: + m.cs = 0 + goto f10 + tr56: + m.cs = 0 + goto f11 + tr74: + m.cs = 0 + goto f13 + tr81: + m.cs = 0 + goto f15 + tr83: + m.cs = 0 + goto f17 + tr86: + m.cs = 0 + goto f19 + tr103: + m.cs = 0 + goto f20 + tr106: + m.cs = 0 + goto f21 + tr169: + m.cs = 0 + goto f22 + tr172: + m.cs = 0 + goto f23 + tr177: + m.cs = 0 + goto f24 + tr185: + m.cs = 0 + goto f25 + tr189: + m.cs = 0 + goto f27 + tr194: + m.cs = 0 + goto f28 + tr199: + m.cs = 0 + goto f29 + tr203: + m.cs = 0 + goto f30 + tr236: + m.cs = 0 + goto f46 + tr1: + m.cs = 2 + goto f1 + tr2: + m.cs = 3 + goto _again + tr3: + m.cs = 4 + goto _again + tr4: + m.cs = 5 + goto f2 + tr6: + m.cs = 6 + goto f4 + tr9: + m.cs = 7 + goto _again + tr11: + m.cs = 8 + goto _again + tr12: + m.cs = 9 + goto _again + tr13: + m.cs = 10 + goto _again + tr14: + m.cs = 11 + goto _again + tr15: + m.cs = 12 + goto _again + tr16: + m.cs = 13 + goto _again + tr17: + m.cs = 14 + goto _again + tr18: + m.cs = 15 + goto _again + tr19: + m.cs = 16 + goto _again + tr20: + m.cs = 17 + goto _again + tr21: + m.cs = 18 + goto _again + tr22: + m.cs = 19 + goto _again + tr23: + m.cs = 20 + goto _again + tr24: + m.cs = 21 + goto _again + tr25: + m.cs = 22 + goto _again + tr26: + m.cs = 23 + goto _again + tr27: + m.cs = 24 + goto _again + tr28: + m.cs = 25 + goto _again + tr29: + m.cs = 26 + goto _again + tr30: + m.cs = 27 + goto _again + tr31: + m.cs = 28 + goto _again + tr32: + m.cs = 29 + goto _again + tr33: + m.cs = 30 + goto _again + tr34: + m.cs = 31 + goto _again + tr35: + m.cs = 32 + goto _again + tr36: + m.cs = 33 + goto _again + tr37: + m.cs = 34 + goto _again + tr38: + m.cs = 35 + goto _again + tr39: + m.cs = 36 + goto _again + tr40: + m.cs = 37 + goto _again + tr10: + m.cs = 38 + goto f6 + tr213: + m.cs = 39 + goto _again + tr43: + m.cs = 39 + goto f4 + tr45: + m.cs = 40 + goto _again + tr46: + m.cs = 40 + goto f9 + tr7: + m.cs = 41 + goto f1 + tr49: + m.cs = 42 + goto _again + tr50: + m.cs = 43 + goto _again + tr52: + m.cs = 45 + goto f1 + tr53: + m.cs = 46 + goto _again + tr54: + m.cs = 47 + goto _again + tr55: + m.cs = 48 + goto f2 + tr57: + m.cs = 49 + goto f4 + tr58: + m.cs = 50 + goto _again + tr59: + m.cs = 51 + goto _again + tr60: + m.cs = 52 + goto _again + tr61: + m.cs = 53 + goto _again + tr62: + m.cs = 54 + goto _again + tr63: + m.cs = 55 + goto _again + tr64: + m.cs = 56 + goto _again + tr65: + m.cs = 57 + goto _again + tr66: + m.cs = 58 + goto _again + tr67: + m.cs = 59 + goto _again + tr68: + m.cs = 60 + goto _again + tr69: + m.cs = 61 + goto _again + tr70: + m.cs = 62 + goto _again + tr71: + m.cs = 63 + goto _again + tr72: + m.cs = 64 + goto _again + tr73: + m.cs = 65 + goto f12 + tr75: + m.cs = 66 + goto f4 + tr78: + m.cs = 67 + goto _again + tr79: + m.cs = 68 + goto _again + tr80: + m.cs = 69 + goto f14 + tr215: + m.cs = 70 + goto f35 + tr217: + m.cs = 71 + goto _again + tr85: + m.cs = 71 + goto f18 + tr87: + m.cs = 72 + goto _again + tr88: + m.cs = 72 + goto f9 + tr76: + m.cs = 73 + goto f4 + tr91: + m.cs = 74 + goto _again + tr92: + m.cs = 75 + goto _again + tr93: + m.cs = 76 + goto _again + tr77: + m.cs = 77 + goto f4 + tr94: + m.cs = 78 + goto _again + tr95: + m.cs = 79 + goto _again + tr96: + m.cs = 80 + goto _again + tr97: + m.cs = 81 + goto _again + tr98: + m.cs = 82 + goto _again + tr99: + m.cs = 84 + goto f1 + tr100: + m.cs = 85 + goto _again + tr101: + m.cs = 86 + goto _again + tr102: + m.cs = 87 + goto f2 + tr104: + m.cs = 88 + goto f4 + tr107: + m.cs = 89 + goto _again + tr109: + m.cs = 90 + goto _again + tr111: + m.cs = 91 + goto _again + tr113: + m.cs = 92 + goto _again + tr115: + m.cs = 93 + goto _again + tr117: + m.cs = 94 + goto _again + tr119: + m.cs = 95 + goto _again + tr121: + m.cs = 96 + goto _again + tr123: + m.cs = 97 + goto _again + tr125: + m.cs = 98 + goto _again + tr127: + m.cs = 99 + goto _again + tr129: + m.cs = 100 + goto _again + tr131: + m.cs = 101 + goto _again + tr133: + m.cs = 102 + goto _again + tr135: + m.cs = 103 + goto _again + tr137: + m.cs = 104 + goto _again + tr139: + m.cs = 105 + goto _again + tr141: + m.cs = 106 + goto _again + tr143: + m.cs = 107 + goto _again + tr145: + m.cs = 108 + goto _again + tr147: + m.cs = 109 + goto _again + tr149: + m.cs = 110 + goto _again + tr151: + m.cs = 111 + goto _again + tr153: + m.cs = 112 + goto _again + tr155: + m.cs = 113 + goto _again + tr157: + m.cs = 114 + goto _again + tr159: + m.cs = 115 + goto _again + tr161: + m.cs = 116 + goto _again + tr163: + m.cs = 117 + goto _again + tr165: + m.cs = 118 + goto _again + tr167: + m.cs = 119 + goto _again + tr168: + m.cs = 120 + goto f6 + tr225: + m.cs = 121 + goto _again + tr223: + m.cs = 121 + goto f4 + tr173: + m.cs = 122 + goto _again + tr174: + m.cs = 122 + goto f9 + tr220: + m.cs = 123 + goto _again + tr171: + m.cs = 123 + goto f4 + tr178: + m.cs = 124 + goto _again + tr179: + m.cs = 124 + goto f9 + tr221: + m.cs = 125 + goto f38 + tr182: + m.cs = 126 + goto _again + tr228: + m.cs = 127 + goto _again + tr187: + m.cs = 127 + goto f26 + tr234: + m.cs = 127 + goto f44 + tr190: + m.cs = 128 + goto _again + tr191: + m.cs = 128 + goto f9 + tr240: + m.cs = 129 + goto _again + tr205: + m.cs = 129 + goto f31 + tr245: + m.cs = 129 + goto f50 + tr195: + m.cs = 130 + goto _again + tr196: + m.cs = 130 + goto f9 + tr237: + m.cs = 131 + goto f31 + tr200: + m.cs = 132 + goto _again + tr201: + m.cs = 132 + goto f9 + tr188: + m.cs = 133 + goto f26 + tr247: + m.cs = 134 + goto f45 + tr184: + m.cs = 135 + goto _again + tr206: + m.cs = 136 + goto f31 + tr248: + m.cs = 136 + goto f50 + tr166: + m.cs = 137 + goto _again + tr164: + m.cs = 138 + goto _again + tr162: + m.cs = 139 + goto _again + tr160: + m.cs = 140 + goto _again + tr158: + m.cs = 141 + goto _again + tr156: + m.cs = 142 + goto _again + tr154: + m.cs = 143 + goto _again + tr152: + m.cs = 144 + goto _again + tr150: + m.cs = 145 + goto _again + tr148: + m.cs = 146 + goto _again + tr146: + m.cs = 147 + goto _again + tr144: + m.cs = 148 + goto _again + tr142: + m.cs = 149 + goto _again + tr140: + m.cs = 150 + goto _again + tr138: + m.cs = 151 + goto _again + tr136: + m.cs = 152 + goto _again + tr134: + m.cs = 153 + goto _again + tr132: + m.cs = 154 + goto _again + tr130: + m.cs = 155 + goto _again + tr128: + m.cs = 156 + goto _again + tr126: + m.cs = 157 + goto _again + tr124: + m.cs = 158 + goto _again + tr122: + m.cs = 159 + goto _again + tr120: + m.cs = 160 + goto _again + tr118: + m.cs = 161 + goto _again + tr116: + m.cs = 162 + goto _again + tr114: + m.cs = 163 + goto _again + tr112: + m.cs = 164 + goto _again + tr110: + m.cs = 165 + goto _again + tr108: + m.cs = 166 + goto _again + tr105: + m.cs = 167 + goto f1 + tr208: + m.cs = 168 + goto _again + tr209: + m.cs = 169 + goto _again + tr210: + m.cs = 170 + goto f2 + tr211: + m.cs = 171 + goto _again + tr212: + m.cs = 172 + goto _again + tr42: + m.cs = 172 + goto f4 + tr47: + m.cs = 173 + goto _again + tr48: + m.cs = 173 + goto f9 + tr214: + m.cs = 174 + goto _again + tr82: + m.cs = 174 + goto f16 + tr216: + m.cs = 175 + goto _again + tr84: + m.cs = 175 + goto f18 + tr89: + m.cs = 176 + goto _again + tr90: + m.cs = 176 + goto f9 + tr218: + m.cs = 177 + goto _again + tr170: + m.cs = 177 + goto f4 + tr219: + m.cs = 178 + goto f38 + tr227: + m.cs = 178 + goto f42 + tr233: + m.cs = 178 + goto f45 + tr239: + m.cs = 178 + goto f48 + tr244: + m.cs = 178 + goto f51 + tr224: + m.cs = 179 + goto _again + tr222: + m.cs = 179 + goto f4 + tr175: + m.cs = 180 + goto _again + tr176: + m.cs = 180 + goto f9 + tr180: + m.cs = 181 + goto _again + tr181: + m.cs = 181 + goto f9 + tr226: + m.cs = 182 + goto _again + tr186: + m.cs = 182 + goto f26 + tr232: + m.cs = 182 + goto f44 + tr192: + m.cs = 183 + goto _again + tr193: + m.cs = 183 + goto f9 + tr229: + m.cs = 184 + goto f42 + tr235: + m.cs = 184 + goto f45 + tr230: + m.cs = 185 + goto _again + tr231: + m.cs = 186 + goto _again + tr238: + m.cs = 187 + goto _again + tr204: + m.cs = 187 + goto f31 + tr243: + m.cs = 187 + goto f50 + tr197: + m.cs = 188 + goto _again + tr198: + m.cs = 188 + goto f9 + tr241: + m.cs = 189 + goto _again + tr246: + m.cs = 189 + goto f50 + tr242: + m.cs = 190 + goto _again + tr202: + m.cs = 191 + goto _again + tr207: + m.cs = 192 + goto _again + tr249: + m.cs = 193 + goto _again + + f4: + + m.pb = m.p + + goto _again + f9: + + // List of positions in the buffer to later lowercase + output.tolower = append(output.tolower, m.p-m.pb) + + goto _again + f2: + + output.prefix = string(m.text()) + + goto _again + f6: + + output.ID = string(m.text()) + + goto _again + f38: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + goto _again + f0: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f5: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f7: + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f23: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + goto _again + f11: + + m.err = fmt.Errorf(errSCIMNamespace, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f13: + + m.err = fmt.Errorf(errSCIMType, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f15: + + m.err = fmt.Errorf(errSCIMName, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f17: + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f14: + + output.scim.Type = scimschema.TypeFromString(string(m.text())) + + goto _again + f16: + + output.scim.pos = m.p + + goto _again + f35: + + output.scim.Name = string(m.data[output.scim.pos:m.p]) + + goto _again + f18: + + output.scim.pos = m.p + + goto _again + f22: + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f21: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f42: + + output.rComponent = string(m.text()) + + goto _again + f48: + + output.qComponent = string(m.text()) + + goto _again + f44: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + goto _again + f50: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + goto _again + f25: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f30: + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f1: + + m.pb = m.p + + if m.parsingMode != RFC8141Only { + // Throw an error when: + // - we are entering here matching the the prefix in the namespace identifier part + // - looking ahead (3 chars) we find a colon + if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" { + m.err = fmt.Errorf(errNoUrnWithinID, pos) + (m.p)-- + + m.cs = 193 + goto _again + + } + } + + goto _again + f12: + + output.ID = string(m.text()) + + output.scim = &SCIM{} + + goto _again + f3: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f10: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f8: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f19: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f24: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f27: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f28: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f20: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f26: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + m.pb = m.p + + goto _again + f45: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + output.rComponent = string(m.text()) + + goto _again + f31: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + m.pb = m.p + + goto _again + f51: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + output.qComponent = string(m.text()) + + goto _again + f46: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f29: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + + _again: + switch _toStateActions[m.cs] { + case 33: + + (m.p)-- + + m.err = fmt.Errorf(err8141InformalID, m.p) + m.cs = 193 + goto _again + } + + if m.cs == 0 { + goto _out + } + if (m.p)++; (m.p) != (m.pe) { + goto _resume + } + _testEof: + { + } + if (m.p) == (m.eof) { + switch _eofActions[m.cs] { + case 1: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 6: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 8: + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 24: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + case 12: + + m.err = fmt.Errorf(errSCIMNamespace, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 14: + + m.err = fmt.Errorf(errSCIMType, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 16: + + m.err = fmt.Errorf(errSCIMName, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 18: + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + (m.p)-- + + m.cs = 193 + goto _again + + case 23: + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 22: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 26: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 31: + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 34: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC2141 + + case 38: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC8141 + + case 4: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 11: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 9: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 20: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + (m.p)-- + + m.cs = 193 + goto _again + + case 25: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 28: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 29: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 21: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 42: + + output.rComponent = string(m.text()) + + output.kind = RFC8141 + + case 48: + + output.qComponent = string(m.text()) + + output.kind = RFC8141 + + case 41: + + output.fComponent = string(m.text()) + + output.kind = RFC8141 + + case 40: + + m.pb = m.p + + output.fComponent = string(m.text()) + + output.kind = RFC8141 + + case 30: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 35: + + output.scim.Name = string(m.data[output.scim.pos:m.p]) + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC7643 + + case 37: + + output.scim.Other = string(m.data[output.scim.pos:m.p]) + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC7643 + + case 44: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + output.rComponent = string(m.text()) + + output.kind = RFC8141 + + case 50: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + output.qComponent = string(m.text()) + + output.kind = RFC8141 + } + } + + _out: + { + } + } + + if m.cs < firstFinal || m.cs == enFail { + return nil, m.err + } + + return output, nil +} + +func (m *machine) WithParsingMode(x ParsingMode) { + m.parsingMode = x + switch m.parsingMode { + case RFC2141Only: + m.startParsingAt = enMain + case RFC8141Only: + m.startParsingAt = enRfc8141Only + case RFC7643Only: + m.startParsingAt = enScimOnly + } + m.parsingModeSet = true +} diff --git a/vendor/github.com/leodido/go-urn/machine.go.rl b/vendor/github.com/leodido/go-urn/machine.go.rl new file mode 100644 index 0000000000..0a17421998 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go.rl @@ -0,0 +1,386 @@ +package urn + +import ( + "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]" + errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]" + errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]" + errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]" + errSCIMOther = "expecting a well-formed other SCIM part [col %d]" + errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]" + err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]" + err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]" + err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]" + err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]" + err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]" + err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" + err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" +) + +%%{ +machine urn; + +# unsigned alphabet +alphtype uint8; + +action mark { + m.pb = m.p +} + +action tolower { + // List of positions in the buffer to later lowercase + output.tolower = append(output.tolower, m.p - m.pb) +} + +action set_pre { + output.prefix = string(m.text()) +} + +action throw_pre_urn_err { + if m.parsingMode != RFC8141Only { + // Throw an error when: + // - we are entering here matching the the prefix in the namespace identifier part + // - looking ahead (3 chars) we find a colon + if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" { + m.err = fmt.Errorf(errNoUrnWithinID, pos) + fhold; + fgoto fail; + } + } +} + +action set_nid { + output.ID = string(m.text()) +} + +action set_nss { + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } +} + +action err_pre { + m.err = fmt.Errorf(errPrefix, m.p) + fhold; + fgoto fail; +} + +action err_nid { + m.err = fmt.Errorf(errIdentifier, m.p) + fhold; + fgoto fail; +} + +action err_nss { + m.err = fmt.Errorf(errSpecificString, m.p) + fhold; + fgoto fail; +} + +action err_urn { + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + fhold; + fgoto fail; +} + +action err_hex { + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + fhold; + fgoto fail; + } +} + +action base_type { + output.kind = RFC2141; +} + +pre = ([uU] @err(err_pre) [rR] @err(err_pre) [nN] @err(err_pre)) >mark >throw_pre_urn_err %set_pre; + +nid = (alnum >mark (alnum | '-'){0,31}) $err(err_nid) %set_nid; + +hex = '%' (digit | lower | upper >tolower){2} $err(err_hex); + +sss = (alnum | [()+,\-.:=@;$_!*']); + +nss = (sss | hex)+ $err(err_nss); + +nid_not_urn = (nid - pre %err(err_urn)); + +urn = pre ':' @err(err_pre) (nid_not_urn ':' nss >mark %set_nss) %eof(base_type); + +### SCIM BEG + +action err_scim_nid { + m.err = fmt.Errorf(errSCIMNamespace, m.p) + fhold; + fgoto fail; +} + +action err_scim_type { + m.err = fmt.Errorf(errSCIMType, m.p) + fhold; + fgoto fail; +} + +action err_scim_name { + m.err = fmt.Errorf(errSCIMName, m.p) + fhold; + fgoto fail; +} + +action err_scim_other { + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + fhold; + fgoto fail; +} + +action scim_type { + output.kind = RFC7643; +} + +action create_scim { + output.scim = &SCIM{}; +} + +action set_scim_type { + output.scim.Type = scimschema.TypeFromString(string(m.text())) +} + +action mark_scim_name { + output.scim.pos = m.p +} + +action set_scim_name { + output.scim.Name = string(m.data[output.scim.pos:m.p]) +} + +action mark_scim_other { + output.scim.pos = m.p +} + +action set_scim_other { + output.scim.Other = string(m.data[output.scim.pos:m.p]) +} + +scim_nid = 'ietf:params:scim' >mark %set_nid %create_scim $err(err_scim_nid); + +scim_other = ':' (sss | hex)+ >mark_scim_other %set_scim_other $err(err_scim_other); + +scim_name = (alnum)+ >mark_scim_name %set_scim_name $err(err_scim_name); + +scim_type = ('schemas' | 'api' | 'param') >mark %set_scim_type $err(err_scim_type); + +scim_only := pre ':' @err(err_pre) (scim_nid ':' scim_type ':' scim_name scim_other? %set_nss) %eof(scim_type); + +### SCIM END + +### 8141 BEG + +action err_nss_8141 { + m.err = fmt.Errorf(err8141SpecificString, m.p) + fhold; + fgoto fail; +} + +action err_nid_8141 { + m.err = fmt.Errorf(err8141Identifier, m.p) + fhold; + fgoto fail; +} + +action rfc8141_type { + output.kind = RFC8141; +} + +action set_r_component { + output.rComponent = string(m.text()) +} + +action set_q_component { + output.qComponent = string(m.text()) +} + +action set_f_component { + output.fComponent = string(m.text()) +} + +action informal_nid_match { + fhold; + m.err = fmt.Errorf(err8141InformalID, m.p); + fgoto fail; +} + +action mark_r_start { + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + fhold; + fgoto fail; + } + output.rStart = true +} + +action mark_q_start { + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + fhold; + fgoto fail; + } + output.qStart = true +} + +action err_malformed_r_component { + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + fhold; + fgoto fail; +} + +action err_malformed_q_component { + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + fhold; + fgoto fail; +} + +pchar = (sss | '~' | '&' | hex); + +component = pchar (pchar | '/' | '?')*; + +r_start = ('?+') %mark_r_start; + +r_component = r_start <: (r_start | component)+ $err(err_malformed_r_component) >mark %set_r_component; + +q_start = ('?=') %mark_q_start; + +q_component = q_start <: (q_start | component)+ $err(err_malformed_q_component) >mark %set_q_component; + +rq_components = (r_component :>> q_component? | q_component); + +fragment = (pchar | '/' | '?')*; + +f_component = '#' fragment >mark %set_f_component; + +nss_rfc8141 = (pchar >mark (pchar | '/')*) $err(err_nss_8141) %set_nss; + +nid_rfc8141 = (alnum >mark (alnum | '-'){0,30} alnum) $err(err_nid_8141) %set_nid; + +informal_id = pre ('-' [a-zA-z0] %to(informal_nid_match)); + +nid_rfc8141_not_urn = (nid_rfc8141 - informal_id?); + +rfc8141_only := pre ':' @err(err_pre) nid_rfc8141_not_urn ':' nss_rfc8141 rq_components? f_component? %eof(rfc8141_type); + +### 8141 END + +fail := (any - [\n\r])* @err{ fgoto main; }; + +main := urn; + +}%% + +%% write data noerror noprefix; + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) + WithParsingMode(ParsingMode) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + startParsingAt int + parsingMode ParsingMode + parsingModeSet bool +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine(options ...Option) Machine { + m := &machine{ + parsingModeSet: false, + } + + for _, o := range options { + o(m) + } + // Set default parsing mode + if !m.parsingModeSet { + m.WithParsingMode(DefaultParsingMode) + } + + %% access m.; + %% variable p m.p; + %% variable pe m.pe; + %% variable eof m.eof; + %% variable data m.data; + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 or RFC7643 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.cs = m.startParsingAt + output := &URN{ + tolower: []int{}, + } + + %% write exec; + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} + +func (m *machine) WithParsingMode(x ParsingMode) { + m.parsingMode = x + switch m.parsingMode { + case RFC2141Only: + m.startParsingAt = en_main + case RFC8141Only: + m.startParsingAt = en_rfc8141_only + case RFC7643Only: + m.startParsingAt = en_scim_only + } + m.parsingModeSet = true +} \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile new file mode 100644 index 0000000000..68d5dd0f1b --- /dev/null +++ b/vendor/github.com/leodido/go-urn/makefile @@ -0,0 +1,51 @@ +SHELL := /bin/bash +RAGEL := ragel +GOFMT := go fmt + +export GO_TEST=env GOTRACEBACK=all go test $(GO_ARGS) + +.PHONY: build +build: machine.go + +.PHONY: clean +clean: + @rm -rf docs + @rm -f machine.go + +.PHONY: images +images: docs/urn.png + +.PHONY: snake2camel +snake2camel: + @cd ./tools/snake2camel; go build -o ../../snake2camel . + +.PHONY: removecomments +removecomments: + @cd ./tools/removecomments; go build -o ../../removecomments . + +machine.go: machine.go.rl + +machine.go: snake2camel + +machine.go: removecomments + +machine.go: + $(RAGEL) -Z -G1 -e -o $@ $< + @./removecomments $@ + @./snake2camel $@ + $(GOFMT) $@ + +docs/urn.dot: machine.go.rl + @mkdir -p docs + $(RAGEL) -Z -e -Vp $< -o $@ + +docs/urn.png: docs/urn.dot + dot $< -Tpng -o $@ + +.PHONY: bench +bench: *_test.go machine.go + go test -bench=. -benchmem -benchtime=5s ./... + +.PHONY: tests +tests: *_test.go + $(GO_TEST) ./... diff --git a/vendor/github.com/leodido/go-urn/options.go b/vendor/github.com/leodido/go-urn/options.go new file mode 100644 index 0000000000..c543835a28 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/options.go @@ -0,0 +1,9 @@ +package urn + +type Option func(Machine) + +func WithParsingMode(mode ParsingMode) Option { + return func(m Machine) { + m.WithParsingMode(mode) + } +} diff --git a/vendor/github.com/leodido/go-urn/parsing_mode.go b/vendor/github.com/leodido/go-urn/parsing_mode.go new file mode 100644 index 0000000000..fce5aadc3c --- /dev/null +++ b/vendor/github.com/leodido/go-urn/parsing_mode.go @@ -0,0 +1,12 @@ +package urn + +type ParsingMode int + +const ( + Default ParsingMode = iota + RFC2141Only + RFC7643Only + RFC8141Only +) + +const DefaultParsingMode = RFC2141Only diff --git a/vendor/github.com/leodido/go-urn/scim.go b/vendor/github.com/leodido/go-urn/scim.go new file mode 100644 index 0000000000..f6b7aefbad --- /dev/null +++ b/vendor/github.com/leodido/go-urn/scim.go @@ -0,0 +1,48 @@ +package urn + +import ( + "encoding/json" + "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" +) + +const errInvalidSCIMURN = "invalid SCIM URN: %s" + +type SCIM struct { + Type scimschema.Type + Name string + Other string + pos int +} + +func (s SCIM) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +func (s *SCIM) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + // Parse as SCIM + value, ok := Parse([]byte(str), WithParsingMode(RFC7643Only)) + if !ok { + return fmt.Errorf(errInvalidSCIMURN, str) + } + if value.RFC() != RFC7643 { + return fmt.Errorf(errInvalidSCIMURN, str) + } + *s = *value.SCIM() + + return nil +} + +func (s *SCIM) String() string { + ret := fmt.Sprintf("urn:ietf:params:scim:%s:%s", s.Type.String(), s.Name) + if s.Other != "" { + ret += fmt.Sprintf(":%s", s.Other) + } + + return ret +} diff --git a/vendor/github.com/leodido/go-urn/scim/schema/type.go b/vendor/github.com/leodido/go-urn/scim/schema/type.go new file mode 100644 index 0000000000..134918230f --- /dev/null +++ b/vendor/github.com/leodido/go-urn/scim/schema/type.go @@ -0,0 +1,36 @@ +package scimschema + +type Type int + +const ( + Unsupported Type = iota + Schemas + API + Param +) + +func (t Type) String() string { + switch t { + case Schemas: + return "schemas" + case API: + return "api" + case Param: + return "param" + } + + return "" +} + +func TypeFromString(input string) Type { + switch input { + case "schemas": + return Schemas + case "api": + return API + case "param": + return Param + } + + return Unsupported +} diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go new file mode 100644 index 0000000000..894d6258dc --- /dev/null +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -0,0 +1,141 @@ +package urn + +import ( + "encoding/json" + "fmt" + "strings" +) + +const errInvalidURN = "invalid URN: %s" + +// URN represents an Uniform Resource Name. +// +// The general form represented is: +// +// urn:: +// +// Details at https://tools.ietf.org/html/rfc2141. +type URN struct { + prefix string // Static prefix. Equal to "urn" when empty. + ID string // Namespace identifier (NID) + SS string // Namespace specific string (NSS) + norm string // Normalized namespace specific string + kind Kind + scim *SCIM + rComponent string // RFC8141 + qComponent string // RFC8141 + fComponent string // RFC8141 + rStart bool // RFC8141 + qStart bool // RFC8141 + tolower []int +} + +// Normalize turns the receiving URN into its norm version. +// +// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except tokens which are lowercased). +func (u *URN) Normalize() *URN { + return &URN{ + prefix: "urn", + ID: strings.ToLower(u.ID), + SS: u.norm, + // rComponent: u.rComponent, + // qComponent: u.qComponent, + // fComponent: u.fComponent, + } +} + +// Equal checks the lexical equivalence of the current URN with another one. +func (u *URN) Equal(x *URN) bool { + if x == nil { + return false + } + nu := u.Normalize() + nx := x.Normalize() + + return nu.prefix == nx.prefix && nu.ID == nx.ID && nu.SS == nx.SS +} + +// String reassembles the URN into a valid URN string. +// +// This requires both ID and SS fields to be non-empty. +// Otherwise it returns an empty string. +// +// Default URN prefix is "urn". +func (u *URN) String() string { + var res string + if u.ID != "" && u.SS != "" { + if u.prefix == "" { + res += "urn" + } + res += u.prefix + ":" + u.ID + ":" + u.SS + if u.rComponent != "" { + res += "?+" + u.rComponent + } + if u.qComponent != "" { + res += "?=" + u.qComponent + } + if u.fComponent != "" { + res += "#" + u.fComponent + } + } + + return res +} + +// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax (RFC 2141). +func Parse(u []byte, options ...Option) (*URN, bool) { + urn, err := NewMachine(options...).Parse(u) + if err != nil { + return nil, false + } + + return urn, true +} + +// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u URN) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// UnmarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u *URN) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + if value, ok := Parse([]byte(str)); !ok { + return fmt.Errorf(errInvalidURN, str) + } else { + *u = *value + } + + return nil +} + +func (u *URN) IsSCIM() bool { + return u.kind == RFC7643 +} + +func (u *URN) SCIM() *SCIM { + if u.kind != RFC7643 { + return nil + } + + return u.scim +} + +func (u *URN) RFC() Kind { + return u.kind +} + +func (u *URN) FComponent() string { + return u.fComponent +} + +func (u *URN) QComponent() string { + return u.qComponent +} + +func (u *URN) RComponent() string { + return u.rComponent +} diff --git a/vendor/github.com/leodido/go-urn/urn8141.go b/vendor/github.com/leodido/go-urn/urn8141.go new file mode 100644 index 0000000000..da4dd062e3 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/urn8141.go @@ -0,0 +1,30 @@ +package urn + +import ( + "encoding/json" + "fmt" +) + +const errInvalidURN8141 = "invalid URN per RFC 8141: %s" + +type URN8141 struct { + *URN +} + +func (u URN8141) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +func (u *URN8141) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + if value, ok := Parse([]byte(str), WithParsingMode(RFC8141Only)); !ok { + return fmt.Errorf(errInvalidURN8141, str) + } else { + *u = URN8141{value} + } + + return nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE b/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE b/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE new file mode 100644 index 0000000000..156dcf39f8 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE @@ -0,0 +1,13 @@ +libovsdb + +Copyright 2014-2015 Socketplane Inc. +Copyright 2015-2018 Docker Inc. + +This software consists of voluntary contributions made by many individuals. For +exact contribution history, see the commit history. + +Modifications Copyright 2018-2019 eBay Inc. + +This software contains modifications developed by eBay Inc. and voluntary contributions +from other individuals in a fork maintained at https://github.com/eBay/libovsdb +For details on these contributions, please consult the git history. diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go b/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go new file mode 100644 index 0000000000..4840aa2461 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go @@ -0,0 +1,1281 @@ +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/gob" + "encoding/hex" + "fmt" + "reflect" + "sort" + "strings" + "sync" + + "github.com/go-logr/logr" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/updates" +) + +const ( + updateEvent = "update" + addEvent = "add" + deleteEvent = "delete" + bufferSize = 65536 + columnDelimiter = "," + keyDelimiter = "|" +) + +// ErrCacheInconsistent is an error that can occur when an operation +// would cause the cache to be inconsistent +type ErrCacheInconsistent struct { + details string +} + +// Error implements the error interface +func (e *ErrCacheInconsistent) Error() string { + msg := "cache inconsistent" + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +func NewErrCacheInconsistent(details string) *ErrCacheInconsistent { + return &ErrCacheInconsistent{ + details: details, + } +} + +// ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes +type ErrIndexExists struct { + Table string + Value any + Index string + New string + Existing []string +} + +func (e *ErrIndexExists) Error() string { + return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value) +} + +func NewIndexExistsError(table string, value any, index string, n string, existing []string) *ErrIndexExists { + return &ErrIndexExists{ + table, value, index, n, existing, + } +} + +// map of unique values to uuids +type valueToUUIDs map[any]uuidset + +// map of column name(s) to unique values, to UUIDs +type columnToValue map[index]valueToUUIDs + +// index is the type used to implement multiple cache indexes +type index string + +// indexType is the type of index +type indexType uint + +const ( + schemaIndexType indexType = iota + clientIndexType +) + +// indexSpec contains details about an index +type indexSpec struct { + index index + columns []model.ColumnKey + indexType indexType +} + +func (s indexSpec) isClientIndex() bool { + return s.indexType == clientIndexType +} + +func (s indexSpec) isSchemaIndex() bool { + return s.indexType == schemaIndexType +} + +// newIndex builds a index from a list of columns +func newIndexFromColumns(columns ...string) index { + sort.Strings(columns) + return index(strings.Join(columns, columnDelimiter)) +} + +// newIndexFromColumnKeys builds a index from a list of column keys +func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index { + // RFC 7047 says that Indexes is a [] and "Each is a set of + // columns whose values, taken together within any given row, must be + // unique within the table". We'll store the column names, separated by comma + // as we'll assume (RFC is not clear), that comma isn't valid in a + columns := make([]string, 0, len(columnsKeys)) + columnsMap := map[string]struct{}{} + for _, columnKey := range columnsKeys { + var column string + if columnKey.Key != nil { + column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key) + } else { + column = columnKey.Column + } + if _, found := columnsMap[column]; !found { + columns = append(columns, column) + columnsMap[column] = struct{}{} + } + } + return newIndexFromColumns(columns...) +} + +// newColumnKeysFromColumns builds a list of column keys from a list of columns +func newColumnKeysFromColumns(columns ...string) []model.ColumnKey { + columnKeys := make([]model.ColumnKey, len(columns)) + for i, column := range columns { + columnKeys[i] = model.ColumnKey{Column: column} + } + return columnKeys +} + +// RowCache is a collections of Models hashed by UUID +type RowCache struct { + name string + dbModel model.DatabaseModel + dataType reflect.Type + cache map[string]model.Model + indexSpecs []indexSpec + indexes columnToValue + mutex sync.RWMutex +} + +// rowByUUID returns one model from the cache by UUID. Caller must hold the row +// cache lock. +func (r *RowCache) rowByUUID(uuid string) model.Model { + if row, ok := r.cache[uuid]; ok { + return model.Clone(row) + } + return nil +} + +// Row returns one model from the cache by UUID +func (r *RowCache) Row(uuid string) model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.rowByUUID(uuid) +} + +func (r *RowCache) HasRow(uuid string) bool { + r.mutex.RLock() + defer r.mutex.RUnlock() + _, found := r.cache[uuid] + return found +} + +// rowsByModels searches the cache to find all rows matching any of the provided +// models, either by UUID or indexes. An error is returned if the model schema +// has no UUID field, or if the provided models are not all the same type. +func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + results := make(map[string]model.Model, len(models)) + for _, m := range models { + if reflect.TypeOf(m) != r.dataType { + return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType) + } + info, _ := r.dbModel.NewModelInfo(m) + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil, err + } + if uuid := field.(string); uuid != "" { + if _, ok := results[uuid]; !ok { + if row := r.rowByUUID(uuid); row != nil { + results[uuid] = row + continue + } + } + } + + // indexSpecs are ordered, schema indexes go first, then client indexes + for _, indexSpec := range r.indexSpecs { + if indexSpec.isClientIndex() && !useClientIndexes { + // Given the ordered indexSpecs, we can break here if we reach the + // first client index + break + } + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[indexSpec.index] + if uuids, ok := vals[val]; ok { + for uuid := range uuids { + if _, ok := results[uuid]; !ok { + results[uuid] = r.rowByUUID(uuid) + } + } + // Break after handling the first found index + // to ensure we preserve index order preference + break + } + } + } + if len(results) == 0 { + return nil, nil + } + return results, nil +} + +// RowByModel searches the cache by UUID and schema indexes. UUID search is +// performed first. Then schema indexes are evaluated in turn by the same order +// with which they are defined in the schema. The model for the first matching +// index is returned along with its UUID. An empty string and nil is returned if +// no Model is found. +func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) { + models, err := r.rowsByModels([]model.Model{m}, false) + if err != nil { + return "", nil, err + } + for uuid, model := range models { + return uuid, model, nil + } + return "", nil, nil +} + +// RowsByModels searches the cache by UUID, schema indexes and client indexes. +// UUID search is performed first. Schema indexes are evaluated next in turn by +// the same order with which they are defined in the schema. Finally, client +// indexes are evaluated in turn by the same order with which they are defined +// in the client DB model. The models for the first matching index are returned, +// which might be more than 1 if they were found through a client index since in +// that case uniqueness is not enforced. Nil is returned if no Model is found. +func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) { + return r.rowsByModels(models, true) +} + +// Create writes the provided content to the cache +func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid)) + } + if reflect.TypeOf(m) != r.dataType { + return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String()) + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return err + } + addIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + return err + } + + uuidset := newUUIDSet(uuid) + + vals := r.indexes[index] + existing := vals[val] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + return NewIndexExistsError(r.name, val, string(index), uuid, existing.list()) + } + + addIndexes[index][val] = uuidset + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + } + + r.cache[uuid] = model.Clone(m) + return nil +} + +// Update updates the content in the cache and returns the original (pre-update) model +func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid)) + } + oldRow := model.Clone(r.cache[uuid]) + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return nil, err + } + newInfo, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + + addIndexes := r.newIndexes() + removeIndexes := r.newIndexes() + var errs []error + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + var err error + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return nil, err + } + newVal, err := valueFromIndex(newInfo, indexSpec.columns) + if err != nil { + return nil, err + } + + // if old and new values are the same, don't worry + if oldVal == newVal { + continue + } + // old and new values are NOT the same + + uuidset := newUUIDSet(uuid) + + // check that there are no conflicts + vals := r.indexes[index] + existing := vals[newVal] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + errs = append(errs, NewIndexExistsError( + r.name, + newVal, + string(index), + uuid, + existing.list(), + )) + } + + addIndexes[index][newVal] = uuidset + removeIndexes[index][oldVal] = uuidset + } + if len(errs) > 0 { + return nil, fmt.Errorf("%+v", errs) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + for k, v := range removeIndexes[index] { + if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + r.cache[uuid] = model.Clone(m) + return oldRow, nil +} + +// IndexExists checks if any of the schema indexes of the provided model is +// already in the cache under a different UUID. +func (r *RowCache) IndexExists(row model.Model) error { + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil + } + uuid := field.(string) + for _, indexSpec := range r.indexSpecs { + if !indexSpec.isSchemaIndex() { + // Given the ordered indexSpecs, we can break here if we reach the + // first non schema index + break + } + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[index] + existing := vals[val] + if !existing.empty() && !existing.equals(newUUIDSet(uuid)) { + return NewIndexExistsError( + r.name, + val, + string(index), + uuid, + existing.list(), + ) + } + } + return nil +} + +// Delete deletes a row from the cache +func (r *RowCache) Delete(uuid string) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid)) + } + oldRow := r.cache[uuid] + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return err + } + + removeIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return err + } + + removeIndexes[index][oldVal] = newUUIDSet(uuid) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range removeIndexes[index] { + // only remove the index if it is pointing to this uuid + // otherwise we can cause a consistency issue if we've processed + // updates out of order + if substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + delete(r.cache, uuid) + return nil +} + +// Rows returns a copy of all Rows in the Cache +func (r *RowCache) Rows() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + result := make(map[string]model.Model) + for k, v := range r.cache { + result[k] = model.Clone(v) + } + return result +} + +// RowsShallow returns a clone'd list of f all Rows in the cache, but does not +// clone the underlying objects. Therefore, the objects returned are READ ONLY. +// This is, however, thread safe, as the cached objects are cloned before being updated +// when modifications come in. +func (r *RowCache) RowsShallow() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + + result := make(map[string]model.Model, len(r.cache)) + for k, v := range r.cache { + result[k] = v + } + return result +} + +// uuidsByConditionsAsIndexes checks possible indexes that can be built with a +// subset of the provided conditions and returns the uuids for the models that +// match that subset of conditions. If no conditions could be used as indexes, +// returns nil. Note that this method does not necessarily match all the +// provided conditions. Thus the caller is required to evaluate all the +// conditions against the returned candidates. This is only useful to obtain, as +// quick as possible, via indexes, a reduced list of candidate models that might +// match all conditions, which should be better than just evaluating all +// conditions against all rows of a table. +// +//nolint:gocyclo // warns overall function is complex but ignores inner functions +func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []any) (uuidset, error) { + type indexableCondition struct { + column string + keys []any + nativeValue any + } + + // build an indexable condition, more appropriate for our processing, from + // an ovsdb condition. Only equality based conditions can be used as indexes + // (or `includes` conditions on map values). + toIndexableCondition := func(condition ovsdb.Condition, nativeValue any) *indexableCondition { + if condition.Column == "_uuid" { + return nil + } + if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes { + return nil + } + v := reflect.ValueOf(nativeValue) + if !v.IsValid() { + return nil + } + isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array + if condition.Function == ovsdb.ConditionIncludes && isSet { + return nil + } + keys := []any{} + if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes { + for _, key := range v.MapKeys() { + keys = append(keys, key.Interface()) + } + } + return &indexableCondition{ + column: condition.Column, + keys: keys, + nativeValue: nativeValue, + } + } + + // for any given set of conditions, we need to check if an index uses the + // same fields as the conditions + indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool { + columnKeys := []model.ColumnKey{} + for _, condition := range conditions { + if len(condition.keys) == 0 { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column}) + continue + } + for _, key := range condition.keys { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key}) + } + } + index := newIndexFromColumnKeys(columnKeys...) + return index == spec.index + } + + // for a specific set of conditions, check if an index can be built from + // them and return the associated UUIDs + evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) { + // build a model with the values from the conditions + m, err := r.dbModel.NewModel(r.name) + if err != nil { + return nil, err + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + for _, conditions := range conditions { + err := info.SetField(conditions.column, conditions.nativeValue) + if err != nil { + return nil, err + } + } + for _, spec := range r.indexSpecs { + if !indexMatchesConditions(spec, conditions) { + continue + } + // if we have an index for those conditions, calculate the index + // value. The models mapped to that value match the conditions. + v, err := valueFromIndex(info, spec.columns) + if err != nil { + return nil, err + } + if v != nil { + uuids := r.indexes[spec.index][v] + if uuids == nil { + // this set of conditions was represented by an index but + // had no matches, return an empty set + uuids = uuidset{} + } + return uuids, nil + } + } + return nil, nil + } + + // set of uuids that match the conditions as we evaluate them + var matching uuidset + + // attempt to evaluate a set of conditions via indexes and intersect the + // results against matches of previous sets + intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) { + uuids, err := evaluateConditionSetAsIndex(indexableConditions) + if err != nil { + return true, err + } + if matching == nil { + matching = uuids + } else if uuids != nil { + matching = intersectUUIDSets(matching, uuids) + } + if matching != nil && len(matching) <= 1 { + // if we had no matches or a single match, no point in continuing + // searching for additional indexes. If we had a single match, it's + // cheaper to just evaluate all conditions on it. + return true, nil + } + return false, nil + } + + // First, filter out conditions that cannot be matched against indexes. With + // the remaining conditions build all possible subsets (the power set of all + // conditions) and for any subset that is an index, intersect the obtained + // uuids with the ones obtained from previous subsets + matchUUIDsFromConditionsPowerSet := func() error { + ps := [][]*indexableCondition{} + // prime the power set with a first empty subset + ps = append(ps, []*indexableCondition{}) + for i, condition := range conditions { + nativeValue := nativeValues[i] + iCondition := toIndexableCondition(condition, nativeValue) + // this is not a condition we can use as an index, skip it + if iCondition == nil { + continue + } + // the power set is built appending the subsets that result from + // adding each item to each of the previous subsets + ss := make([][]*indexableCondition, len(ps)) + for j := range ss { + ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1) + copy(ss[j], ps[j]) + ss[j] = append(ss[j], iCondition) + // as we add them to the power set, attempt to evaluate this + // subset of conditions as indexes + stop, err := intersectUUIDsFromConditionSet(ss[j]) + if stop || err != nil { + return err + } + } + ps = append(ps, ss...) + } + return nil + } + + // finally + err := matchUUIDsFromConditionsPowerSet() + return matching, err +} + +// RowsByCondition searches models in the cache that match all conditions +func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + results := make(map[string]model.Model) + schema := r.dbModel.Schema.Table(r.name) + + // no conditions matches all rows + if len(conditions) == 0 { + for uuid := range r.cache { + results[uuid] = r.rowByUUID(uuid) + } + return results, nil + } + + // one pass to obtain the native values + nativeValues := make([]any, 0, len(conditions)) + for _, condition := range conditions { + tSchema := schema.Column(condition.Column) + nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value) + if err != nil { + return nil, err + } + nativeValues = append(nativeValues, nativeValue) + } + + // obtain all possible matches using conditions as indexes + matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues) + if err != nil { + return nil, err + } + + // From the matches obtained with indexes, which might have not used all + // conditions, continue trimming down the list explicitly evaluating the + // conditions. + for i, condition := range conditions { + matchingCondition := uuidset{} + + if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) { + uuid, ok := nativeValues[i].(string) + if !ok { + panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i])) + } + if _, found := r.cache[uuid]; found { + matchingCondition.add(uuid) + } + } else { + matchCondition := func(uuid string) error { + row := r.cache[uuid] + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + value, err := info.FieldByColumn(condition.Column) + if err != nil { + return err + } + ok, err := condition.Function.Evaluate(value, nativeValues[i]) + if err != nil { + return err + } + if ok { + matchingCondition.add(uuid) + } + return nil + } + if matching != nil { + // we just need to consider rows that matched previous + // conditions + for uuid := range matching { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } else { + // If this is the first condition we are able to check, just run + // it by whole table + for uuid := range r.cache { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } + } + if matching == nil { + matching = matchingCondition + } else { + matching = intersectUUIDSets(matching, matchingCondition) + } + if matching.empty() { + // no models match the conditions checked up to now, no need to + // check remaining conditions + break + } + } + + for uuid := range matching { + results[uuid] = r.rowByUUID(uuid) + } + + return results, nil +} + +// Len returns the length of the cache +func (r *RowCache) Len() int { + r.mutex.RLock() + defer r.mutex.RUnlock() + return len(r.cache) +} + +func (r *RowCache) Index(columns ...string) (map[any][]string, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + spec := newIndexFromColumns(columns...) + index, ok := r.indexes[spec] + if !ok { + return nil, fmt.Errorf("%v is not an index", columns) + } + dbIndex := make(map[any][]string, len(index)) + for k, v := range index { + dbIndex[k] = v.list() + } + return dbIndex, nil +} + +// EventHandler can handle events when the contents of the cache changes +type EventHandler interface { + OnAdd(table string, model model.Model) + OnUpdate(table string, old model.Model, newModel model.Model) + OnDelete(table string, model model.Model) +} + +// EventHandlerFuncs is a wrapper for the EventHandler interface +// It allows a caller to only implement the functions they need +type EventHandlerFuncs struct { + AddFunc func(table string, model model.Model) + UpdateFunc func(table string, old model.Model, newModel model.Model) + DeleteFunc func(table string, model model.Model) +} + +// OnAdd calls AddFunc if it is not nil +func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) { + if e.AddFunc != nil { + e.AddFunc(table, model) + } +} + +// OnUpdate calls UpdateFunc if it is not nil +func (e *EventHandlerFuncs) OnUpdate(table string, old, newModel model.Model) { + if e.UpdateFunc != nil { + e.UpdateFunc(table, old, newModel) + } +} + +// OnDelete calls DeleteFunc if it is not nil +func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) { + if e.DeleteFunc != nil { + e.DeleteFunc(table, row) + } +} + +// TableCache contains a collection of RowCaches, hashed by name, +// and an array of EventHandlers that respond to cache updates +// It implements the ovsdb.NotificationHandler interface so it may +// handle update notifications +type TableCache struct { + cache map[string]*RowCache + eventProcessor *eventProcessor + dbModel model.DatabaseModel + ovsdb.NotificationHandler + mutex sync.RWMutex + logger *logr.Logger +} + +// Data is the type for data that can be prepopulated in the cache +type Data map[string]map[string]model.Model + +// NewTableCache creates a new TableCache +func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated") + } + if logger == nil { + l := logr.Discard() + logger = &l + } else { + l := logger.WithName("cache") + logger = &l + } + eventProcessor := newEventProcessor(bufferSize, logger) + cache := make(map[string]*RowCache) + tableTypes := dbModel.Types() + for name := range dbModel.Schema.Tables { + cache[name] = newRowCache(name, dbModel, tableTypes[name]) + } + for table, rowData := range data { + if _, ok := dbModel.Schema.Tables[table]; !ok { + return nil, fmt.Errorf("table %s is not in schema", table) + } + rowCache := cache[table] + for uuid, row := range rowData { + if err := rowCache.Create(uuid, row, true); err != nil { + return nil, err + } + } + } + return &TableCache{ + cache: cache, + eventProcessor: eventProcessor, + dbModel: dbModel, + mutex: sync.RWMutex{}, + logger: logger, + }, nil +} + +// Mapper returns the mapper +func (t *TableCache) Mapper() mapper.Mapper { + return t.dbModel.Mapper +} + +// DatabaseModel returns the DatabaseModelRequest +func (t *TableCache) DatabaseModel() model.DatabaseModel { + return t.dbModel +} + +// Table returns the a Table from the cache with a given name +func (t *TableCache) Table(name string) *RowCache { + t.mutex.RLock() + defer t.mutex.RUnlock() + if table, ok := t.cache[name]; ok { + return table + } + return nil +} + +// Tables returns a list of table names that are in the cache +func (t *TableCache) Tables() []string { + t.mutex.RLock() + defer t.mutex.RUnlock() + var result []string + for k := range t.cache { + result = append(result, k) + } + return result +} + +// Update implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update(_ any, tableUpdates ovsdb.TableUpdates) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate") + return err + } + return nil +} + +// Update2 implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update2(_ any, tableUpdates ovsdb.TableUpdates2) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate2(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate2") + return err + } + return nil +} + +// Locked implements the locked method of the NotificationHandler interface +func (t *TableCache) Locked([]any) { +} + +// Stolen implements the stolen method of the NotificationHandler interface +func (t *TableCache) Stolen([]any) { +} + +// Echo implements the echo method of the NotificationHandler interface +func (t *TableCache) Echo([]any) { +} + +// Disconnected implements the disconnected method of the NotificationHandler interface +func (t *TableCache) Disconnected() { +} + +// Populate adds data to the cache and places an event on the channel +func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Populate2 adds data to the cache and places an event on the channel +func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error { + t.mutex.Lock() + defer t.mutex.Unlock() + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + if row.Initial == nil && row.Insert == nil && current == nil { + return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid)) + } + err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Purge drops all data in the cache and reinitializes it using the +// provided database model +func (t *TableCache) Purge(dbModel model.DatabaseModel) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.dbModel = dbModel + tableTypes := t.dbModel.Types() + for name := range t.dbModel.Schema.Tables { + t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name]) + } +} + +// AddEventHandler registers the supplied EventHandler to receive cache events +func (t *TableCache) AddEventHandler(handler EventHandler) { + t.eventProcessor.AddEventHandler(handler) +} + +// Run starts the event processing and update processing loops. +// It blocks until the stop channel is closed. +// Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection +func (t *TableCache) Run(stopCh <-chan struct{}) { + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + t.eventProcessor.Run(stopCh) + }() + wg.Wait() +} + +// newRowCache creates a new row cache with the provided data +// if the data is nil, and empty RowCache will be created +func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache { + schemaIndexes := dbModel.Schema.Table(name).Indexes + clientIndexes := dbModel.Client().Indexes(name) + + r := &RowCache{ + name: name, + dbModel: dbModel, + indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)), + dataType: dataType, + cache: make(map[string]model.Model), + mutex: sync.RWMutex{}, + } + + // respect the order of indexes, add first schema indexes, then client + // indexes + indexes := map[index]indexSpec{} + for _, columns := range schemaIndexes { + columnKeys := newColumnKeysFromColumns(columns...) + index := newIndexFromColumnKeys(columnKeys...) + spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + for _, clientIndex := range clientIndexes { + columnKeys := clientIndex.Columns + index := newIndexFromColumnKeys(columnKeys...) + // if this is already a DB index, ignore + if _, ok := indexes[index]; ok { + continue + } + spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + + r.indexes = r.newIndexes() + return r +} + +func (r *RowCache) newIndexes() columnToValue { + c := make(columnToValue) + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + c[index] = make(valueToUUIDs) + } + return c +} + +// event encapsulates a cache event +type event struct { + eventType string + table string + old model.Model + new model.Model +} + +// eventProcessor handles the queueing and processing of cache events +type eventProcessor struct { + events chan *event + // handlersMutex locks the handlers array when we add a handler or dispatch events + // we don't need a RWMutex in this case as we only have one thread reading and the write + // volume is very low (i.e only when AddEventHandler is called) + handlersMutex sync.Mutex + handlers []EventHandler + logger *logr.Logger +} + +func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor { + return &eventProcessor{ + events: make(chan *event, capacity), + handlers: []EventHandler{}, + logger: logger, + } +} + +// AddEventHandler registers the supplied EventHandler with the eventProcessor +// EventHandlers MUST process events quickly, for example, pushing them to a queue +// to be processed by the client. Long Running handler functions adversely affect +// other handlers and MAY cause loss of data if the channel buffer is full +func (e *eventProcessor) AddEventHandler(handler EventHandler) { + e.handlersMutex.Lock() + defer e.handlersMutex.Unlock() + e.handlers = append(e.handlers, handler) +} + +// AddEvent writes an event to the channel +func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, newModel model.Model) { + // We don't need to check for error here since there + // is only a single writer. RPC is run in blocking mode + event := event{ + eventType: eventType, + table: table, + old: old, + new: newModel, + } + select { + case e.events <- &event: + // noop + return + default: + e.logger.V(0).Info("dropping event because event buffer is full") + } +} + +// Run runs the eventProcessor loop. +// It will block until the stopCh has been closed +// Otherwise it will wait for events to arrive on the event channel +// Once received, it will dispatch the event to each registered handler +func (e *eventProcessor) Run(stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + case event := <-e.events: + e.handlersMutex.Lock() + for _, handler := range e.handlers { + switch event.eventType { + case addEvent: + handler.OnAdd(event.table, event.new) + case updateEvent: + handler.OnUpdate(event.table, event.old, event.new) + case deleteEvent: + handler.OnDelete(event.table, event.old) + } + } + e.handlersMutex.Unlock() + } + } +} + +type cacheUpdate interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, newModel model.Model) error) error +} + +func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error { + tables := update.GetUpdatedTables() + for _, table := range tables { + tCache := t.cache[table] + err := update.ForEachModelUpdate(table, func(uuid string, old, newModel model.Model) error { + switch { + case old == nil && newModel != nil: + t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", newModel) + err := tCache.Create(uuid, newModel, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(addEvent, table, nil, newModel) + case old != nil && newModel != nil: + t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", newModel) + _, err := tCache.Update(uuid, newModel, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(updateEvent, table, old, newModel) + case newModel == nil: + t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old) + err := tCache.Delete(uuid) + if err != nil { + return err + } + t.eventProcessor.AddEvent(deleteEvent, table, old, nil) + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (any, error) { + if len(columnKeys) > 1 { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + for _, columnKey := range columnKeys { + val, err := valueFromColumnKey(info, columnKey) + if err != nil { + return "", err + } + // if object is nil dont try to encode it + value := reflect.ValueOf(val) + if value.Kind() == reflect.Invalid { + continue + } + // if object is a nil pointer dont try to encode it + if value.Kind() == reflect.Pointer && value.IsNil() { + continue + } + err = enc.Encode(val) + if err != nil { + return "", err + } + } + h := sha256.New() + val := hex.EncodeToString(h.Sum(buf.Bytes())) + return val, nil + } + val, err := valueFromColumnKey(info, columnKeys[0]) + if err != nil { + return "", err + } + return val, err +} + +func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (any, error) { + val, err := info.FieldByColumn(columnKey.Column) + if err != nil { + return nil, err + } + if columnKey.Key != nil { + val, err = valueFromMap(val, columnKey.Key) + if err != nil { + return "", fmt.Errorf("can't get key value from map: %v", err) + } + } + // if the value is a non-nil pointer of an optional, dereference + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr && !v.IsNil() { + val = v.Elem().Interface() + } + return val, err +} + +func valueFromMap(aMap any, key any) (any, error) { + m := reflect.ValueOf(aMap) + if m.Kind() != reflect.Map { + return nil, fmt.Errorf("expected map but got %s", m.Kind()) + } + v := m.MapIndex(reflect.ValueOf(key)) + if !v.IsValid() { + // return the zero value for the map value type + return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil + } + + return v.Interface(), nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go b/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go new file mode 100644 index 0000000000..25f1597e2d --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go @@ -0,0 +1,16 @@ +/* +Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server. + +The cache can be accessed using a simple API: + + cache.Table("Open_vSwitch").Row("") + +It implements the ovsdb.NotificationHandler interface +such that it can be populated automatically by +update notifications + +It also contains an eventProcessor where callers +may registers functions that will get called on +every Add/Update/Delete event. +*/ +package cache diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go b/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go new file mode 100644 index 0000000000..f7c1397378 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go @@ -0,0 +1,101 @@ +package cache + +type void struct{} +type uuidset map[string]void + +func newUUIDSet(uuids ...string) uuidset { + s := uuidset{} + for _, uuid := range uuids { + s[uuid] = void{} + } + return s +} + +func (s uuidset) add(uuid string) { + s[uuid] = void{} +} + +func (s uuidset) remove(uuid string) { + delete(s, uuid) +} + +func (s uuidset) has(uuid string) bool { + _, ok := s[uuid] + return ok +} + +func (s uuidset) equals(o uuidset) bool { + if len(s) != len(o) { + return false + } + for uuid := range s { + if !o.has(uuid) { + return false + } + } + return true +} + +func (s uuidset) getAny() string { + for k := range s { + return k + } + return "" +} + +func (s uuidset) list() []string { + uuids := make([]string, 0, len(s)) + for uuid := range s { + uuids = append(uuids, uuid) + } + return uuids +} + +func (s uuidset) empty() bool { + return len(s) == 0 +} + +func addUUIDSet(s1, s2 uuidset) uuidset { + if len(s2) == 0 { + return s1 + } + if s1 == nil { + s1 = uuidset{} + } + for uuid := range s2 { + s1.add(uuid) + } + return s1 +} + +func substractUUIDSet(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return s1 + } + for uuid := range s2 { + s1.remove(uuid) + } + return s1 +} + +func intersectUUIDSets(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return nil + } + var big uuidset + var small uuidset + if len(s1) > len(s2) { + big = s1 + small = s2 + } else { + big = s2 + small = s1 + } + f := uuidset{} + for uuid := range small { + if big.has(uuid) { + f.add(uuid) + } + } + return f +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go new file mode 100644 index 0000000000..29be11f31f --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go @@ -0,0 +1,752 @@ +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// API defines basic operations to interact with the database +type API interface { + // List populates a slice of Models objects based on their type + // The function parameter must be a pointer to a slice of Models + // Models can be structs or pointers to structs + // If the slice is null, the entire cache will be copied into the slice + // If it has a capacity != 0, only 'capacity' elements will be filled in + List(ctx context.Context, result any) error + + // Create a Conditional API from a Function that is used to filter cached data + // The function must accept a Model implementation and return a boolean. E.g: + // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled }) + WhereCache(predicate any) ConditionalAPI + + // Create a ConditionalAPI from a Model's index data, where operations + // apply to elements that match the values provided in one or more + // model.Models according to the indexes. All provided Models must be + // the same type or an error will be generated when operations are + // are performed on the ConditionalAPI. + Where(...model.Model) ConditionalAPI + + // Select selects all rows from a table, with optional column filtering. + // The model is used to determine the table, but not for filtering. + Select(model.Model, ...any) ([]ovsdb.Operation, error) + + // WhereAny creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match any (eg, logical OR) of the + // conditions. + WhereAny(model.Model, ...model.Condition) ConditionalAPI + + // WhereAll creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match all (eg, logical AND) of the + // conditions. + WhereAll(model.Model, ...model.Condition) ConditionalAPI + + // Get retrieves a model from the cache + // The way the object will be fetch depends on the data contained in the + // provided model and the indexes defined in the associated schema + // For more complex ways of searching for elements in the cache, the + // preferred way is Where({condition}).List() + Get(context.Context, model.Model) error + + // Create returns the operation needed to add the model(s) to the Database + // Only fields with non-default values will be added to the transaction. If + // the field associated with column "_uuid" has some content other than a + // UUID, it will be treated as named-uuid + Create(...model.Model) ([]ovsdb.Operation, error) +} + +// ConditionalAPI is an interface used to perform operations that require / use Conditions +type ConditionalAPI interface { + // List uses the condition to search on the cache and populates + // the slice of Models objects based on their type + List(ctx context.Context, result any) error + + // Mutate returns the operations needed to perform the mutation specified + // By the model and the list of Mutation objects + // Depending on the Condition, it might return one or many operations + Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error) + + // Update returns the operations needed to update any number of rows according + // to the data in the given model. + // By default, all the non-default values contained in model will be updated. + // Optional fields can be passed (pointer to fields in the model) to select the + // the fields to be updated + Update(model.Model, ...any) ([]ovsdb.Operation, error) + + // Delete returns the Operations needed to delete the models selected via the condition + Delete() ([]ovsdb.Operation, error) + + // Wait returns the operations needed to perform the wait specified + // by the until condition, timeout, row and columns based on provided parameters. + Wait(ovsdb.WaitCondition, *int, model.Model, ...any) ([]ovsdb.Operation, error) + + // Select returns the operations to search on the database. + // Depending on the Condition, it might return one or many operations. + // Use GetSelectResults on the results of the transaction to gather the found Models + // Optional fields can be passed (pointer to fields in the model) to select specific + // columns to be returned. If no fields are provided, all columns will be selected. + Select(m model.Model, fields ...any) ([]ovsdb.Operation, error) +} + +// ErrWrongType is used to report the user provided parameter has the wrong type +type ErrWrongType struct { + inputType reflect.Type + reason string +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason) +} + +// ErrNotFound is used to inform the object or table was not found in the cache +var ErrNotFound = errors.New("object not found") + +// api struct implements both API and ConditionalAPI +// Where() can be used to create a ConditionalAPI api +type api struct { + cache *cache.TableCache + cond Conditional + logger *logr.Logger + validateModel bool + // withReadLock optionally acquires a read lock (and any preconditions such as + // cache-consistency checks) and returns an unlock function. + withReadLock func(context.Context) func() +} + +// List populates a slice of Models given as parameter based on the configured Condition +func (a api) List(ctx context.Context, result any) error { + unlock := a.lockForRead(ctx) + if unlock != nil { + defer unlock() + } + + resultPtr := reflect.ValueOf(result) + if resultPtr.Type().Kind() != reflect.Ptr { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + resultVal := reflect.Indirect(resultPtr) + if resultVal.Type().Kind() != reflect.Slice { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + // List accepts a slice of Models that can be either structs or pointer to + // structs + var appendValue func(reflect.Value) + var m model.Model + if resultVal.Type().Elem().Kind() == reflect.Ptr { + m = reflect.New(resultVal.Type().Elem().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, v)) + } + } else { + m = reflect.New(resultVal.Type().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v))) + } + } + + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + if a.cond != nil && a.cond.Table() != table { + return &ErrWrongType{resultPtr.Type(), + fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())} + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + var rows map[string]model.Model + if a.cond != nil { + rows, err = a.cond.Matches() + if err != nil { + return err + } + } else { + rows = tableCache.Rows() + } + // If given a null slice, fill it in the cache table completely, if not, just up to + // its capability. + if resultVal.IsNil() || resultVal.Cap() == 0 { + resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows))) + } + i := resultVal.Len() + maxCap := resultVal.Cap() + + for _, row := range rows { + if i >= maxCap { + break + } + appendValue(reflect.ValueOf(row)) + i++ + } + + return nil +} + +// Where returns a conditionalAPI based on model indexes. All provided models +// must be the same type. +func (a api) Where(models ...model.Model) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger, a.validateModel, a.withReadLock) +} + +// WhereAny returns a conditionalAPI based on a Condition list that matches any +// of the conditions individually +func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger, a.validateModel, a.withReadLock) +} + +// WhereAll returns a conditionalAPI based on a Condition list that matches all +// of the conditions together +func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger, a.validateModel, a.withReadLock) +} + +// WhereCache returns a conditionalAPI based a Predicate +func (a api) WhereCache(predicate any) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger, a.validateModel, a.withReadLock) +} + +// Conditional interface implementation +// FromFunc returns a Condition from a function +func (a api) conditionFromFunc(predicate any) Conditional { + table, err := a.getTableFromFunc(predicate) + if err != nil { + return newErrorConditional(err) + } + + condition, err := newPredicateConditional(table, a.cache, predicate) + if err != nil { + return newErrorConditional(err) + } + return condition +} + +// conditionFromModels returns a Conditional from one or more models. +func (a api) conditionFromModels(models []model.Model) Conditional { + if len(models) == 0 { + return newErrorConditional(fmt.Errorf("at least one model required")) + } + + tableName, err := a.getTableFromModel(models[0]) + if err != nil { + return newErrorConditional(err) + } + + conditional, err := newEqualityConditional(tableName, a.cache, models) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// conditionFromExplicitConditions returns a Conditional from a model and a set +// of explicit conditions. If matchAll is true, then models that match all the given +// conditions are selected by the Conditional. If matchAll is false, then any model +// that matches one of the conditions is selected. +func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional { + if len(cond) == 0 { + return newErrorConditional(fmt.Errorf("at least one condition is required")) + } + tableName, err := a.getTableFromModel(m) + if tableName == "" { + return newErrorConditional(err) + } + conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// Get is a generic Get function capable of returning (through a provided pointer) +// a instance of any row in the cache. +// 'result' must be a pointer to an Model that exists in the ClientDBModel +// +// The way the cache is searched depends on the fields already populated in 'result' +// Any table index (including _uuid) will be used for comparison +func (a api) Get(ctx context.Context, m model.Model) error { + unlock := a.lockForRead(ctx) + if unlock != nil { + defer unlock() + } + + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + _, found, err := tableCache.RowByModel(m) + if err != nil { + return err + } else if found == nil { + return ErrNotFound + } + + model.CloneInto(found, m) + + return nil +} + +// lockForRead runs the optional read-lock hook and returns an unlock function. +// If no hook is configured, it returns nil. +func (a api) lockForRead(ctx context.Context) func() { + if a.withReadLock == nil { + return nil + } + return a.withReadLock(ctx) +} + +// Create is a generic function capable of creating any row in the DB +// A valid Model (pointer to object) must be provided. +func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) { + if len(models) == 0 { + return nil, nil + } + + var operations []ovsdb.Operation + var tableName string + var err error + + for _, m := range models { + var realUUID, namedUUID string + var currentTable string + + currentTable, err = a.getTableFromModel(m) + if err != nil { + return nil, err + } + if a.validateModel { + if err := validateModel(m); err != nil { + return nil, err + } + } + + if tableName == "" { + tableName = currentTable + } else if currentTable != tableName { + return nil, fmt.Errorf("models must belong to the same table for a single Create operation (%s != %s)", currentTable, tableName) + } + + // Use the DatabaseModel associated with the cache to get info + info, err := a.cache.DatabaseModel().NewModelInfo(m) + if err != nil { + return nil, err + } + + if uuid, err := info.FieldByColumn("_uuid"); err == nil { + tmpUUID := uuid.(string) + if ovsdb.IsNamedUUID(tmpUUID) { + namedUUID = tmpUUID + } else if ovsdb.IsValidUUID(tmpUUID) { + realUUID = tmpUUID + + } + } else { + return nil, fmt.Errorf("error accessing _uuid field: %w", err) + } + + // Use the Mapper associated with the cache to create the row + row, err := a.cache.Mapper().NewRow(info) + if err != nil { + return nil, err + } + + // UUID is given in the operation, not the object + delete(row, "_uuid") + + op := ovsdb.Operation{ + Op: ovsdb.OperationInsert, + Table: tableName, + Row: row, + UUID: realUUID, + UUIDName: namedUUID, + } + operations = append(operations, op) + } + return operations, nil +} + +// Mutate returns the operations needed to transform the one Model into another one +func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) { + if len(mutationObjs) < 1 { + return nil, fmt.Errorf("at least one Mutation must be provided") + } + + tableName, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + tableSchema := a.cache.DatabaseModel().Schema.Table(tableName) + if tableSchema == nil { + return nil, fmt.Errorf("schema not found for table %s", tableName) + } + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + // Validate mutations if validation is enabled + if a.validateModel { + err = validateMutations(model, info, mutationObjs...) + if err != nil { + return nil, err + } + } + + // Convert model.Mutation to ovsdb.Mutation and store them + var mutations []ovsdb.Mutation + for _, mutationObj := range mutationObjs { + columnName, err := info.ColumnByPtr(mutationObj.Field) + if err != nil { + return nil, fmt.Errorf("could not get column for mutation field: %w", err) + } + mutation, err := a.cache.Mapper().NewMutation(info, columnName, mutationObj.Mutator, mutationObj.Value) + if err != nil { + return nil, fmt.Errorf("failed to create OVSDB mutation for column '%s': %w", columnName, err) + } + mutations = append(mutations, *mutation) + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + var operations []ovsdb.Operation + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: tableName, + Where: condition, + Mutations: mutations, + }, + ) + } + + return operations, nil +} + +// Update is a generic function capable of updating any mutable field in any row in the database +// Additional fields can be passed (variadic opts) to indicate fields to be updated +// All immutable fields will be ignored +func (a api) Update(model model.Model, fields ...any) ([]ovsdb.Operation, error) { + tableName, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + if a.validateModel { + if err := validateModel(model); err != nil { + return nil, err + } + } + + tableSchema := a.cache.DatabaseModel().Schema.Table(tableName) + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + if len(fields) > 0 { + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + if !tableSchema.Columns[colName].Mutable() { + return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, tableName) + } + } + } + + // Convert the model to a row, considering only specified fields if provided + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + + // Remove immutable fields from the row + for colName, column := range tableSchema.Columns { + if !column.Mutable() { + // Only delete if the key actually exists in the row map + if _, exists := row[colName]; exists { + a.logger.V(2).Info("removing immutable field from update row", "name", colName) + delete(row, colName) + } + } + } + // Also remove _uuid explicitly if it exists + delete(row, "_uuid") + + // Check if the row is empty after removing immutable fields + if len(row) == 0 { + return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable") + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + var operations []ovsdb.Operation + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: tableName, + Row: row, + Where: condition, + }, + ) + } + return operations, nil +} + +// Delete returns the Operation needed to delete the selected models from the database +func (a api) Delete() ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationDelete, + Table: a.cond.Table(), + Where: condition, + }, + ) + } + + return operations, nil +} + +func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...any) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + + /* + Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6 + + lb := &nbdb.LoadBalancer{} + condition := model.Condition{ + Field: &lb.Name, + Function: ovsdb.ConditionEqual, + Value: "lbName", + } + timeout0 := 0 + client.Where(lb, condition).Wait( + ovsdb.WaitConditionNotEqual, // Until + &timeout0, // Timeout + &lb, // Row (and Table) + &lb.Name, // Cols (aka fields) + ) + */ + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + table, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + var columnNames []string + if len(fields) > 0 { + columnNames = make([]string, 0, len(fields)) + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + columnNames = append(columnNames, colName) + } + } + + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + rows := []ovsdb.Row{row} + + for _, condition := range conditions { + operation := ovsdb.Operation{ + Op: ovsdb.OperationWait, + Table: table, + Where: condition, + Until: string(untilConFun), + Columns: columnNames, + Rows: rows, + } + + if timeout != nil { + operation.Timeout = timeout + } + + operations = append(operations, operation) + } + + return operations, nil +} + +// getTableFromModel returns the table name from a Model object after performing +// type verifications on the model +func (a api) getTableFromModel(m any) (string, error) { + if _, ok := m.(model.Model); !ok { + return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"} + } + table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m)) + if table == "" { + return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"} + } + return table, nil +} + +// getTableFromModel returns the table name from a the predicate after performing +// type verifications +func (a api) getTableFromFunc(predicate any) (string, error) { + predType := reflect.TypeOf(predicate) + if predType == nil || predType.Kind() != reflect.Func { + return "", &ErrWrongType{predType, "Expected function"} + } + if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool { + return "", &ErrWrongType{predType, "Expected func(Model) bool"} + } + + modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem() + modelType := predType.In(0) + if !modelType.Implements(modelInterface) { + return "", &ErrWrongType{predType, + fmt.Sprintf("Type %s does not implement Model interface", modelType.String())} + } + + table := a.cache.DatabaseModel().FindTable(modelType) + if table == "" { + return "", &ErrWrongType{predType, + fmt.Sprintf("Model %s not found in Database Model", modelType.String())} + } + return table, nil +} + +// newAPI returns a new API to interact with the database. +// If withReadLock is provided, the first hook is used by read-path methods +// (currently Get and List) to guard cache reads and return a matching unlock func. +func newAPI(cache *cache.TableCache, logger *logr.Logger, validateModel bool, withReadLock ...func(context.Context) func()) API { + var readLockFn func(context.Context) func() + if len(withReadLock) > 0 { + readLockFn = withReadLock[0] + } + + return api{ + cache: cache, + logger: logger, + validateModel: validateModel, + withReadLock: readLockFn, + } +} + +// newConditionalAPI returns a new ConditionalAPI to interact with the database. +// If withReadLock is provided, the first hook is propagated to conditional +// read-path methods (currently List) to guard cache reads. +func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger, validateModel bool, withReadLock ...func(context.Context) func()) ConditionalAPI { + var readLockFn func(context.Context) func() + if len(withReadLock) > 0 { + readLockFn = withReadLock[0] + } + + return api{ + cache: cache, + cond: cond, + logger: logger, + validateModel: validateModel, + withReadLock: readLockFn, + } +} + +// Select returns the operations to search on the database. +// Depending on the Condition, it might return one or many operations. +// If non-conditional it means select all and m should be a zero value. +// Use GetSelectResults on the results of the transaction to gather the found Models +func (a api) Select(m model.Model, fields ...any) ([]ovsdb.Operation, error) { + tableName, err := a.getTableFromModel(m) + if err != nil { + return nil, err + } + var ovsdbConditionsList [][]ovsdb.Condition + if a.cond != nil { + ovsdbConditionsList, err = a.cond.Generate() + if err != nil { + return nil, err + } + } else { + ovsdbConditionsList = [][]ovsdb.Condition{{}} + } + + // Determine columns to select + if a.cache == nil || !a.cache.DatabaseModel().Valid() { + return nil, fmt.Errorf("database model/schema info not available for select") + } + + var columnsToSelect []string + if len(fields) > 0 { + columnsToSelect, err = a.getColumns(m, fields...) + if err != nil { + return nil, err + } + } + + correlationID := uuid.NewString() + operations := make([]ovsdb.Operation, 0, len(ovsdbConditionsList)) + for _, whereClause := range ovsdbConditionsList { + selectOp := ovsdb.Operation{ + Op: ovsdb.OperationSelect, + Table: tableName, + Where: whereClause, + Columns: columnsToSelect, + } + ovsdb.SetCorrelationID(&selectOp, correlationID) + operations = append(operations, selectOp) + } + + return operations, nil +} + +// getColumns is a helper function that determines which columns to select +// based on a model and a list of field pointers. +func (a api) getColumns(m model.Model, fields ...any) ([]string, error) { + if len(fields) == 0 { + return nil, nil + } + info, err := a.cache.DatabaseModel().NewModelInfo(m) + if err != nil { + return nil, fmt.Errorf("failed to create model info for select: %w", err) + } + return info.ColumnsByPtrWithUUID(fields...) +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go new file mode 100644 index 0000000000..9f692e4e45 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go @@ -0,0 +1,289 @@ +package client + +import ( + "encoding/json" + "testing" + + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/stretchr/testify/require" +) + +var apiTestSchema = []byte(`{ + "name": "OVN_Northbound", + "version": "5.31.0", + "cksum": "2352750632 28701", + "tables": { + "Logical_Switch": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "acls": {"type": {"key": {"type": "uuid", + "refTable": "ACL", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "qos_rules": {"type": {"key": {"type": "uuid", + "refTable": "QoS", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "load_balancer": {"type": {"key": {"type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "dns_records": {"type": {"key": {"type": "uuid", + "refTable": "DNS", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "forwarding_groups": { + "type": {"key": {"type": "uuid", + "refTable": "Forwarding_Group", + "refType": "strong"}, + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Logical_Switch_Port": { + "columns": { + "name": {"type": "string"}, + "type": {"type": "string"}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, + "tag_request": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "tag": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "addresses": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "dynamic_addresses": {"type": {"key": "string", + "min": 0, + "max": 1}}, + "port_security": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "dhcpv4_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "dhcpv6_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "ha_chassis_group": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong"}, + "min": 0, + "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false}, + "Bridge": { + "columns": { + "name": { + "type": "string", + "mutable": false}, + "datapath_type": { + "type": "string"}, + "datapath_version": { + "type": "string"}, + "datapath_id": { + "type": {"key": "string", "min": 0, "max": 1}, + "ephemeral": true}, + "stp_enable": { + "type": "boolean"}, + "rstp_enable": { + "type": "boolean"}, + "mcast_snooping_enable": { + "type": "boolean"}, + "ports": { + "type": {"key": {"type": "uuid", + "refTable": "Port"}, + "min": 0, "max": "unlimited"}}, + "mirrors": { + "type": {"key": {"type": "uuid", + "refTable": "Mirror"}, + "min": 0, "max": "unlimited"}}, + "netflow": { + "type": {"key": {"type": "uuid", + "refTable": "NetFlow"}, + "min": 0, "max": 1}}, + "sflow": { + "type": {"key": {"type": "uuid", + "refTable": "sFlow"}, + "min": 0, "max": 1}}, + "ipfix": { + "type": {"key": {"type": "uuid", + "refTable": "IPFIX"}, + "min": 0, "max": 1}}, + "controller": { + "type": {"key": {"type": "uuid", + "refTable": "Controller"}, + "min": 0, "max": "unlimited"}}, + "protocols": { + "type": {"key": {"type": "string", + "enum": ["set", ["OpenFlow10", + "OpenFlow11", + "OpenFlow12", + "OpenFlow13", + "OpenFlow14", + "OpenFlow15"]]}, + "min": 0, "max": "unlimited"}}, + "fail_mode": { + "type": {"key": {"type": "string", + "enum": ["set", ["standalone", "secure"]]}, + "min": 0, "max": 1}}, + "status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "rstp_status": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}, + "ephemeral": true}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "flood_vlans": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 4096}}, + "flow_tables": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 254}, + "value": {"type": "uuid", + "refTable": "Flow_Table"}, + "min": 0, "max": "unlimited"}}, + "auto_attach": { + "type": {"key": {"type": "uuid", + "refTable": "AutoAttach"}, + "min": 0, "max": 1}}}, + "indexes": [["name"]]} + } + }`) + +type testLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` + ACLs []string `ovsdb:"acls"` + DNSRecords []string `ovsdb:"dns_records"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ForwardingGroups []string `ovsdb:"forwarding_groups"` + LoadBalancer []string `ovsdb:"load_balancer"` + Name string `ovsdb:"name"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + QOSRules []string `ovsdb:"qos_rules"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitch) Table() string { + return "Logical_Switch" +} + +// LogicalSwitchPort struct defines an object in Logical_Switch_Port table +type testLogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + Dhcpv4Options *string `ovsdb:"dhcpv4_options"` + Dhcpv6Options *string `ovsdb:"dhcpv6_options"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + ParentName *string `ovsdb:"parent_name"` + PortSecurity []string `ovsdb:"port_security"` + Tag *int `ovsdb:"tag" validate:"omitempty,min=1,max=4095"` + TagRequest *int `ovsdb:"tag_request" validate:"omitempty,min=0,max=4095"` + Type string `ovsdb:"type"` + Up *bool `ovsdb:"up"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitchPort) Table() string { + return "Logical_Switch_Port" +} + +// Bridge defines an object in Bridge table +type testBridge struct { + UUID string `ovsdb:"_uuid"` + AutoAttach *string `ovsdb:"auto_attach"` + Controller []string `ovsdb:"controller"` + DatapathID *string `ovsdb:"datapath_id"` + DatapathType string `ovsdb:"datapath_type"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FailMode *string `ovsdb:"fail_mode" validate:"omitempty,oneof='standalone' 'secure'"` + FloodVLANs []int `ovsdb:"flood_vlans" validate:"max=4096,dive,min=0,max=4095"` + FlowTables map[int]string `ovsdb:"flow_tables" validate:"dive,keys,min=0,max=254"` + IPFIX *string `ovsdb:"ipfix"` + McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` + Mirrors []string `ovsdb:"mirrors"` + Name string `ovsdb:"name"` + Netflow *string `ovsdb:"netflow"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + Protocols []string `ovsdb:"protocols" validate:"dive,oneof='OpenFlow10' 'OpenFlow11' 'OpenFlow12' 'OpenFlow13' 'OpenFlow14' 'OpenFlow15'"` + RSTPEnable bool `ovsdb:"rstp_enable"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Sflow *string `ovsdb:"sflow"` + Status map[string]string `ovsdb:"status"` + STPEnable bool `ovsdb:"stp_enable"` +} + +// Table returns the table name. It's part of the Model interface +func (*testBridge) Table() string { + return "Bridge" +} + +func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache { + var schema ovsdb.DatabaseSchema + err := json.Unmarshal(apiTestSchema, &schema) + require.NoError(t, err) + db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{ + "Logical_Switch": &testLogicalSwitch{}, + "Logical_Switch_Port": &testLogicalSwitchPort{}, + "Bridge": &testBridge{}, + }) + require.NoError(t, err) + dbModel, errs := model.NewDatabaseModel(schema, db) + require.Empty(t, errs) + cache, err := cache.NewTableCache(dbModel, data, nil) + require.NoError(t, err) + return cache +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go new file mode 100644 index 0000000000..4a8d9478e6 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go @@ -0,0 +1,1578 @@ +package client + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/rpc2" + "github.com/cenkalti/rpc2/jsonrpc" + "github.com/go-logr/logr" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb" +) + +// Constants defined for libovsdb +const ( + SSL = "ssl" + TCP = "tcp" + UNIX = "unix" +) + +const serverDB = "_Server" + +// ErrNotConnected is an error returned when the client is not connected +var ErrNotConnected = errors.New("not connected") + +// ErrAlreadyConnected is an error returned when the client is already connected +var ErrAlreadyConnected = errors.New("already connected") + +// ErrUnsupportedRPC is an error returned when an unsupported RPC method is called +var ErrUnsupportedRPC = errors.New("unsupported rpc") + +// Client represents an OVSDB Client Connection +// It provides all the necessary functionality to Connect to a server, +// perform transactions, and build your own replica of the database with +// Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB +// update notifications. +type Client interface { + Connect(context.Context) error + Disconnect() + Close() + Schema() ovsdb.DatabaseSchema + Cache() *cache.TableCache + UpdateEndpoints([]string) + SetOption(Option) error + Connected() bool + DisconnectNotify() chan struct{} + Echo(context.Context) error + Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error) + Monitor(context.Context, *Monitor) (MonitorCookie, error) + MonitorAll(context.Context) (MonitorCookie, error) + MonitorCancel(ctx context.Context, cookie MonitorCookie) error + NewMonitor(...MonitorOption) *Monitor + CurrentEndpoint() string + API + // GetSelectResultsByIndex parses the result of the select operation indicated by + // the 0-based index from the transaction results of the provided operations. + GetSelectResultsByIndex(ops []ovsdb.Operation, results []ovsdb.OperationResult, target interface{}, index int) error + + // GetSelectResults parses the result of the first select operation from the + // transaction results of the provided operations. + GetSelectResults(ops []ovsdb.Operation, results []ovsdb.OperationResult, target interface{}) error +} + +type bufferedUpdate struct { + updates *ovsdb.TableUpdates + updates2 *ovsdb.TableUpdates2 + lastTxnID string +} + +type epInfo struct { + address string + serverID string +} + +// ovsdbClient is an OVSDB client +type ovsdbClient struct { + options *options + metrics metrics + connected bool + rpcClient *rpc2.Client + rpcMutex sync.RWMutex + // endpoints contains all possible endpoints; the first element is + // the active endpoint if connected=true + endpoints []*epInfo + + // The name of the "primary" database - that is to say, the DB + // that the user expects to interact with. + primaryDBName string + databases map[string]*database + + errorCh chan error + stopCh chan struct{} + disconnect chan struct{} + shutdown bool + shutdownMutex sync.Mutex + + handlerShutdown *sync.WaitGroup + + trafficSeen chan struct{} + + logger *logr.Logger +} + +// database is everything needed to map between go types and an ovsdb Database +type database struct { + // model encapsulates the database schema and model of the database we're connecting to + model model.DatabaseModel + // modelMutex protects model from being replaced (via reconnect) while in use + modelMutex sync.RWMutex + + // cache is used to store the updates for monitored tables + cache *cache.TableCache + // cacheMutex protects cache from being replaced (via reconnect) while in use + cacheMutex sync.RWMutex + + api API + + // any ongoing monitors, so we can re-create them if we disconnect + monitors map[string]*Monitor + monitorsMutex sync.Mutex + + // tracks any outstanding updates while waiting for a monitor response + deferUpdates bool + deferredUpdates []*bufferedUpdate +} + +// NewOVSDBClient creates a new OVSDB Client with the provided +// database model. The client can be configured using one or more Option(s), +// like WithTLSConfig. If no WithEndpoint option is supplied, the default of +// unix:/var/run/openvswitch/ovsdb.sock is used +func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) { + return newOVSDBClient(clientDBModel, opts...) +} + +// newOVSDBClient creates a new ovsdbClient +func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) { + ovs := &ovsdbClient{ + primaryDBName: clientDBModel.Name(), + databases: map[string]*database{ + clientDBModel.Name(): { + model: model.NewPartialDatabaseModel(clientDBModel), + monitors: make(map[string]*Monitor), + deferUpdates: true, + deferredUpdates: make([]*bufferedUpdate, 0), + }, + }, + errorCh: make(chan error), + handlerShutdown: &sync.WaitGroup{}, + disconnect: make(chan struct{}), + } + var err error + ovs.options, err = newOptions(opts...) + if err != nil { + return nil, err + } + for _, address := range ovs.options.endpoints { + ovs.endpoints = append(ovs.endpoints, &epInfo{address: address}) + } + + if ovs.options.logger == nil { + // If no logger is provided, use a Discard logger + logger := logr.Discard() + ovs.logger = &logger + } else { + // add the "database" value to the structured logger + // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb) + l := ovs.options.logger.WithValues( + "database", ovs.primaryDBName, + ) + ovs.logger = &l + } + ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem) + ovs.registerMetrics() + + // if we should only connect to the leader, then add the special "_Server" database as well + if ovs.options.leaderOnly { + sm, err := serverdb.FullDatabaseModel() + if err != nil { + return nil, fmt.Errorf("could not initialize model _Server: %w", err) + } + ovs.databases[serverDB] = &database{ + model: model.NewPartialDatabaseModel(sm), + monitors: make(map[string]*Monitor), + } + } + + return ovs, nil +} + +// Connect opens a connection to an OVSDB Server using the +// endpoint provided when the Client was created. +// The connection can be configured using one or more Option(s), like WithTLSConfig +// If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used +func (o *ovsdbClient) Connect(ctx context.Context) error { + if err := o.connect(ctx, false); err != nil { + if err == ErrAlreadyConnected { + return nil + } + return err + } + if o.options.leaderOnly { + if err := o.watchForLeaderChange(); err != nil { + return err + } + } + return nil +} + +// moveEndpointFirst makes the endpoint requested by active the first element +// in the endpoints slice, indicating it is the active endpoint +func (o *ovsdbClient) moveEndpointFirst(i int) { + firstEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append([]*epInfo{firstEp}, othereps...) +} + +// moveEndpointLast moves the requested endpoint to the end of the list +func (o *ovsdbClient) moveEndpointLast(i int) { + lastEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append(othereps, lastEp) +} + +func (o *ovsdbClient) resetRPCClient() { + if o.rpcClient != nil { + o.rpcClient.Close() + o.rpcClient = nil + } +} + +func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient != nil { + return ErrAlreadyConnected + } + + connected := false + connectErrors := []error{} + for i, endpoint := range o.endpoints { + u, err := url.Parse(endpoint.address) + if err != nil { + return err + } + if sid, err := o.tryEndpoint(ctx, u); err != nil { + o.resetRPCClient() + connectErrors = append(connectErrors, + fmt.Errorf("failed to connect to %s: %w", endpoint.address, err)) + } else { + o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid) + endpoint.serverID = sid + o.moveEndpointFirst(i) + connected = true + break + } + } + + if !connected { + if len(connectErrors) == 1 { + return connectErrors[0] + } + var combined []string + for _, e := range connectErrors { + combined = append(combined, e.Error()) + } + + return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". ")) + } + + // if we're reconnecting, re-start all the monitors + if reconnect { + o.logger.V(3).Info("reconnected - restarting monitors") + for dbName, db := range o.databases { + db.monitorsMutex.Lock() + + // Purge entire cache if no monitors exist to update dynamically + if len(db.monitors) == 0 { + db.cache.Purge(db.model) + db.monitorsMutex.Unlock() + continue + } + + // Restart all monitors; each monitor will handle purging + // the cache if necessary + for id, request := range db.monitors { + err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request) + if err != nil { + o.resetRPCClient() + db.monitorsMutex.Unlock() + return err + } + } + db.monitorsMutex.Unlock() + } + } + + go o.handleDisconnectNotification() + if o.options.inactivityTimeout > 0 { + o.handlerShutdown.Add(1) + go o.handleInactivityProbes() + } + for _, db := range o.databases { + o.handlerShutdown.Add(1) + eventStopChan := make(chan struct{}) + go o.handleClientErrors(eventStopChan) + o.handlerShutdown.Add(1) + go func(db *database) { + defer o.handlerShutdown.Done() + db.cache.Run(o.stopCh) + close(eventStopChan) + }(db) + } + + o.connected = true + return nil +} + +// tryEndpoint connects to a single database endpoint. Returns the +// server ID (if clustered) on success, or an error. +func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) { + o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u)) + var dialer net.Dialer + var err error + var c net.Conn + + switch u.Scheme { + case UNIX: + c, err = dialer.DialContext(ctx, u.Scheme, u.Path) + case TCP: + c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque) + case SSL: + dialer := tls.Dialer{ + Config: o.options.tlsConfig, + } + c, err = dialer.DialContext(ctx, "tcp", u.Opaque) + default: + err = fmt.Errorf("unknown network protocol %s", u.Scheme) + } + if err != nil { + return "", fmt.Errorf("failed to open connection: %w", err) + } + + o.createRPC2Client(c) + + serverDBNames, err := o.listDbs(ctx) + if err != nil { + return "", err + } + + // for every requested database, ensure the DB exists in the server and + // that the schema matches what we expect. + for dbName, db := range o.databases { + // check the server has what we want + found := false + for _, name := range serverDBNames { + if name == dbName { + found = true + break + } + } + if !found { + return "", fmt.Errorf("target database %s not found", dbName) + } + + // load and validate the schema + schema, err := o.getSchema(ctx, dbName) + if err != nil { + return "", err + } + + db.modelMutex.Lock() + var errors []error + db.model, errors = model.NewDatabaseModel(schema, db.model.Client()) + db.modelMutex.Unlock() + if len(errors) > 0 { + var combined []string + for _, err := range errors { + combined = append(combined, err.Error()) + } + return "", fmt.Errorf("database %s validation error (%d): %s", + dbName, len(errors), strings.Join(combined, ". ")) + } + + db.cacheMutex.Lock() + if db.cache == nil { + db.cache, err = cache.NewTableCache(db.model, nil, o.logger) + if err != nil { + db.cacheMutex.Unlock() + return "", err + } + dbNameForWait := dbName + dbForWait := db + db.api = newAPI(db.cache, o.logger, o.options.validateModel, func(ctx context.Context) func() { + waitForCacheConsistent(ctx, dbForWait, o.logger, dbNameForWait) + return dbForWait.cacheMutex.RUnlock + }) + } + db.cacheMutex.Unlock() + } + + // check that this is the leader + var sid string + if o.options.leaderOnly { + var leader bool + leader, sid, err = o.isEndpointLeader(ctx) + if err != nil { + return "", err + } + if !leader { + return "", fmt.Errorf("endpoint is not leader") + } + } + return sid, nil +} + +// createRPC2Client creates an rpcClient using the provided connection +// It is also responsible for setting up go routines for client-side event handling +// Should only be called when the mutex is held +func (o *ovsdbClient) createRPC2Client(conn net.Conn) { + o.stopCh = make(chan struct{}) + if o.options.inactivityTimeout > 0 { + o.trafficSeen = make(chan struct{}) + } + o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn)) + o.rpcClient.SetBlocking(true) + o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []any, reply *[]any) error { + return o.echo(args, reply) + }) + o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]any) error { + return o.update(args, reply) + }) + o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]any) error { + return o.update2(args, reply) + }) + o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]any) error { + return o.update3(args, reply) + }) + go o.rpcClient.Run() +} + +// isEndpointLeader returns true if the currently connected endpoint is leader, +// otherwise false or an error. If the currently connected endpoint is the leader +// and the database is clustered, also returns the database's Server ID. +// Assumes rpcMutex is held. +func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) { + op := ovsdb.Operation{ + Op: ovsdb.OperationSelect, + Table: "Database", + Columns: []string{"name", "model", "leader", "sid"}, + } + results, err := o.transact(ctx, serverDB, true, op) + if err != nil { + return false, "", fmt.Errorf("could not check if server was leader: %w", err) + } + // for now, if no rows are returned, just accept this server + if len(results) != 1 { + return true, "", nil + } + result := results[0] + if len(result.Rows) == 0 { + return true, "", nil + } + + for _, row := range result.Rows { + dbName, ok := row["name"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse name") + } + if dbName != o.primaryDBName { + continue + } + + model, ok := row["model"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse model") + } + + // the database reports whether or not it is part of a cluster via the + // "model" column. If it's not clustered, it is by definition leader. + if model != serverdb.DatabaseModelClustered { + return true, "", nil + } + + // Clustered database must have a Server ID + sid, ok := row["sid"].(ovsdb.UUID) + if !ok { + return false, "", fmt.Errorf("could not parse server id") + } + + leader, ok := row["leader"].(bool) + if !ok { + return false, "", fmt.Errorf("could not parse leader") + } + + return leader, sid.GoUUID, nil + } + + // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed) + // for now, just continue + o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName) + return true, "", nil +} + +func (o *ovsdbClient) primaryDB() *database { + return o.databases[o.primaryDBName] +} + +// Schema returns the DatabaseSchema that is being used by the client +// it will be nil until a connection has been established +func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema { + db := o.primaryDB() + db.modelMutex.RLock() + defer db.modelMutex.RUnlock() + return db.model.Schema +} + +// Cache returns the TableCache that is populated from +// ovsdb update notifications. It will be nil until a connection +// has been established, and empty unless you call Monitor +func (o *ovsdbClient) Cache() *cache.TableCache { + db := o.primaryDB() + db.cacheMutex.RLock() + defer db.cacheMutex.RUnlock() + return db.cache +} + +// UpdateEndpoints sets client endpoints +// It is intended to be called at runtime +func (o *ovsdbClient) UpdateEndpoints(endpoints []string) { + o.logger.V(3).Info("update endpoints", "endpoints", endpoints) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if len(endpoints) == 0 { + endpoints = []string{defaultUnixEndpoint} + } + o.options.endpoints = endpoints + originEps := o.endpoints[:] + var newEps []*epInfo + activeIdx := -1 + for i, address := range o.options.endpoints { + var serverID string + for j, origin := range originEps { + if address == origin.address { + if j == 0 { + activeIdx = i + } + serverID = origin.serverID + break + } + } + newEps = append(newEps, &epInfo{address: address, serverID: serverID}) + } + o.endpoints = newEps + if activeIdx > 0 { + o.moveEndpointFirst(activeIdx) + } else if activeIdx == -1 { + o._disconnect() + } +} + +// SetOption sets a new value for an option. +// It may only be called when the client is not connected +func (o *ovsdbClient) SetOption(opt Option) error { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient != nil { + return fmt.Errorf("cannot set option when client is connected") + } + return opt(o.options) +} + +// Connected returns whether or not the client is currently connected to the server +func (o *ovsdbClient) Connected() bool { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + return o.connected +} + +func (o *ovsdbClient) CurrentEndpoint() string { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return "" + } + return o.endpoints[0].address +} + +// DisconnectNotify returns a channel which will notify the caller when the +// server has disconnected +func (o *ovsdbClient) DisconnectNotify() chan struct{} { + return o.disconnect +} + +// RFC 7047 : Section 4.1.6 : Echo +func (o *ovsdbClient) echo(args []any, reply *[]any) error { + *reply = args + return nil +} + +// RFC 7047 : Update Notification Section 4.1.6 +// params is an array of length 2: [json-value, table-updates] +// - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie" +// - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris +func (o *ovsdbClient) update(params []json.RawMessage, reply *[]any) error { + cookie := MonitorCookie{} + *reply = []any{} + if len(params) > 2 { + return fmt.Errorf("update requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc() + for tableName := range updates { + o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc() + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update(cookie.ID, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update2 handling from ovsdb-server.7 +func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]any) error { + cookie := MonitorCookie{} + *reply = []any{} + if len(params) > 2 { + return fmt.Errorf("update2 requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update3 handling from ovsdb-server.7 +func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]any) error { + cookie := MonitorCookie{} + *reply = []any{} + if len(params) > 3 { + return fmt.Errorf("update requires exactly 3 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var lastTransactionID string + err = json.Unmarshal(params[1], &lastTransactionID) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[2], &updates) + if err != nil { + return err + } + + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err == nil { + db.monitorsMutex.Lock() + mon := db.monitors[cookie.ID] + mon.LastTransactionID = lastTransactionID + db.monitorsMutex.Unlock() + } + + return err +} + +// getSchema returns the schema in use for the provided database name +// RFC 7047 : get_schema +// Should only be called when mutex is held +func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) { + args := ovsdb.NewGetSchemaArgs(dbName) + var reply ovsdb.DatabaseSchema + err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ovsdb.DatabaseSchema{}, ErrNotConnected + } + return ovsdb.DatabaseSchema{}, err + } + return reply, err +} + +// listDbs returns the list of databases on the server +// RFC 7047 : list_dbs +// Should only be called when mutex is held +func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) { + var dbs []string + err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, fmt.Errorf("listdbs failure - %v", err) + } + return dbs, err +} + +// logFromContext returns a Logger from ctx or return the default logger +func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger { + if logger, err := logr.FromContext(ctx); err == nil { + return &logger + } + return o.logger +} + +// Transact performs the provided Operations on the database +// RFC 7047 : transact +func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + o.rpcMutex.RLock() + if o.rpcClient == nil || !o.connected { + o.rpcMutex.RUnlock() + if o.options.reconnect { + logger.V(5).Info("blocking transaction until reconnected", "operations", + fmt.Sprintf("%+v", operation)) + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + ReconnectWaitLoop: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err()) + case <-ticker.C: + o.rpcMutex.RLock() + if o.rpcClient != nil && o.connected { + break ReconnectWaitLoop + } + o.rpcMutex.RUnlock() + } + } + } else { + return nil, ErrNotConnected + } + } + defer o.rpcMutex.RUnlock() + return o.transact(ctx, o.primaryDBName, false, operation...) +} + +func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + var reply []ovsdb.OperationResult + db := o.databases[dbName] + db.modelMutex.RLock() + schema := o.databases[dbName].model.Schema + db.modelMutex.RUnlock() + if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) { + return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName) + } + if ok := schema.ValidateOperations(operation...); !ok { + return nil, fmt.Errorf("validation failed for the operation") + } + + args := ovsdb.NewTransactArgs(dbName, operation...) + if o.rpcClient == nil { + return nil, ErrNotConnected + } + dbgLogger := logger.WithValues("database", dbName).V(4) + if dbgLogger.Enabled() { + dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation)) + } + err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, err + } + + if !skipChWrite && o.trafficSeen != nil { + select { + case o.trafficSeen <- struct{}{}: + default: + // If the channel is full, drop the message + } + } + return reply, nil +} + +// MonitorAll is a convenience method to monitor every table/column +func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) { + m := newMonitor() + for name := range o.primaryDB().model.Types() { + m.Tables = append(m.Tables, TableMonitor{Table: name}) + } + return o.Monitor(ctx, m) +} + +// MonitorCancel will request cancel a previously issued monitor request +// RFC 7047 : monitor_cancel +func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error { + var reply ovsdb.OperationResult + args := ovsdb.NewMonitorCancelArgs(cookie) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + return err + } + if reply.Error != "" { + return fmt.Errorf("error while executing transaction: %s", reply.Error) + } + o.primaryDB().monitorsMutex.Lock() + defer o.primaryDB().monitorsMutex.Unlock() + delete(o.primaryDB().monitors, cookie.ID) + o.metrics.numMonitors.Dec() + return nil +} + +// Monitor will provide updates for a given table/column +// and populate the cache with them. Subsequent updates will be processed +// by the Update Notifications +// RFC 7047 : monitor +func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) { + cookie := newMonitorCookie(o.primaryDBName) + db := o.databases[o.primaryDBName] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return cookie, o.monitor(ctx, cookie, false, monitor) +} + +// If fields is provided, the request will be constrained to the provided columns +// If no fields are provided, all columns will be used +func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) { + var columns []string + if len(fields) > 0 { + columns = append(columns, fields...) + } else { + for c := range data.Metadata.TableSchema.Columns { + columns = append(columns, c) + } + } + return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil +} + +// monitor must only be called with a lock on monitorsMutex +// +//gocyclo:ignore +func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error { + // if we're reconnecting, we already hold the rpcMutex + if !reconnecting { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + } + if o.rpcClient == nil { + return ErrNotConnected + } + if len(monitor.Errors) != 0 { + var errString []string + for _, err := range monitor.Errors { + errString = append(errString, err.Error()) + } + return errors.New(strings.Join(errString, ". ")) + } + if len(monitor.Tables) == 0 { + return errors.New("at least one table should be monitored") + } + dbName := cookie.DatabaseName + db := o.databases[dbName] + db.modelMutex.RLock() + typeMap := db.model.Types() + requests := make(map[string]ovsdb.MonitorRequest) + for _, o := range monitor.Tables { + _, ok := typeMap[o.Table] + if !ok { + return fmt.Errorf("type for table %s does not exist in model", o.Table) + } + model, err := db.model.NewModel(o.Table) + if err != nil { + return err + } + info, err := db.model.NewModelInfo(model) + if err != nil { + return err + } + request, err := newMonitorRequest(info, o.Fields, o.Conditions) + if err != nil { + return err + } + requests[o.Table] = *request + } + db.modelMutex.RUnlock() + + var args []any + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + // If we are reconnecting a CondSince monitor that is the only + // monitor, then we can use its LastTransactionID since it is + // valid (because we're reconnecting) and we can safely keep + // the cache intact (because it's the only monitor). + transactionID := emptyUUID + if reconnecting && len(db.monitors) == 1 { + transactionID = monitor.LastTransactionID + } + args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID) + } else { + args = ovsdb.NewMonitorArgs(dbName, cookie, requests) + } + var err error + var tableUpdates any + + var lastTransactionFound bool + switch monitor.Method { + case ovsdb.MonitorRPC: + var reply ovsdb.TableUpdates + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorRPC: + var reply ovsdb.TableUpdates2 + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorSinceRPC: + var reply ovsdb.MonitorCondSinceReply + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + if err == nil && reply.Found { + monitor.LastTransactionID = reply.LastTransactionID + lastTransactionFound = true + } + tableUpdates = reply.Updates + default: + return fmt.Errorf("unsupported monitor method: %v", monitor.Method) + } + + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + if err.Error() == "unknown method" { + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond") + monitor.Method = ovsdb.ConditionalMonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + if monitor.Method == ovsdb.ConditionalMonitorRPC { + o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor") + monitor.Method = ovsdb.MonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + } + return err + } + + if !reconnecting { + db.monitors[cookie.ID] = monitor + o.metrics.numMonitors.Inc() + } + + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + + // On reconnect, purge the cache _unless_ the only monitor is a + // MonitorCondSince one, whose LastTransactionID was known to the + // server. In this case the reply contains only updates to the existing + // cache data, while otherwise it includes complete DB data so we must + // purge to get rid of old rows. + if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) { + db.cache.Purge(db.model) + } + + if monitor.Method == ovsdb.MonitorRPC { + u := tableUpdates.(ovsdb.TableUpdates) + err = db.cache.Populate(u) + } else { + u := tableUpdates.(ovsdb.TableUpdates2) + err = db.cache.Populate2(u) + } + + if err != nil { + return err + } + + // populate any deferred updates + db.deferUpdates = false + for _, update := range db.deferredUpdates { + if update.updates != nil { + if err = db.cache.Populate(*update.updates); err != nil { + return err + } + } + + if update.updates2 != nil { + if err = db.cache.Populate2(*update.updates2); err != nil { + return err + } + } + if len(update.lastTxnID) > 0 { + db.monitors[cookie.ID].LastTransactionID = update.lastTxnID + } + } + // clear deferred updates for next time + db.deferredUpdates = make([]*bufferedUpdate, 0) + + return err +} + +// Echo tests the liveness of the OVSDB connetion +func (o *ovsdbClient) Echo(ctx context.Context) error { + args := ovsdb.NewEchoArgs() + var reply []any + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + return err + } + if !reflect.DeepEqual(args, reply) { + return fmt.Errorf("incorrect server response: %v, %v", args, reply) + } + return nil +} + +// watchForLeaderChange will trigger a reconnect if the connected endpoint +// ever loses leadership +func (o *ovsdbClient) watchForLeaderChange() error { + updates := make(chan model.Model) + o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, _, n model.Model) { + if table == "Database" { + updates <- n + } + }, + }) + + m := newMonitor() + // NOTE: _Server does not support monitor_cond_since + m.Method = ovsdb.ConditionalMonitorRPC + m.Tables = []TableMonitor{{Table: "Database"}} + db := o.databases[serverDB] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m) + if err != nil { + return err + } + + go func() { + for m := range updates { + dbInfo, ok := m.(*serverdb.Database) + if !ok { + continue + } + + // Ignore the dbInfo for _Server + if dbInfo.Name != o.primaryDBName { + continue + } + + // Only handle leadership changes for clustered databases + if dbInfo.Model != serverdb.DatabaseModelClustered { + continue + } + + // Clustered database servers must have a valid Server ID + var sid string + if dbInfo.Sid != nil { + sid = *dbInfo.Sid + } + if sid == "" { + o.logger.V(3).Info("clustered database update contained invalid server ID") + continue + } + + o.rpcMutex.Lock() + if !dbInfo.Leader && o.connected { + activeEndpoint := o.endpoints[0] + if sid == activeEndpoint.serverID { + o.logger.V(3).Info("endpoint lost leader, reconnecting", + "endpoint", activeEndpoint.address, "sid", sid) + // don't immediately reconnect to the active endpoint since it's no longer leader + o.moveEndpointLast(0) + o._disconnect() + } else { + o.logger.V(3).Info("endpoint lost leader but had unexpected server ID", + "endpoint", activeEndpoint.address, + "expected", activeEndpoint.serverID, "found", sid) + } + } + o.rpcMutex.Unlock() + } + }() + return nil +} + +func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) { + defer o.handlerShutdown.Done() + var errColumnNotFound *mapper.ErrColumnNotFound + var errCacheInconsistent *cache.ErrCacheInconsistent + var errIndexExists *cache.ErrIndexExists + for { + select { + case <-stopCh: + return + case err := <-o.errorCh: + if errors.As(err, &errColumnNotFound) { + o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!") + } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) { + // trigger a reconnect, which will purge the cache + // hopefully a rebuild will fix any inconsistency + o.logger.V(3).Error(err, "triggering reconnect to rebuild cache") + // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we + // need to reset the last txn ID + for _, db := range o.databases { + db.monitorsMutex.Lock() + for _, mon := range db.monitors { + mon.LastTransactionID = emptyUUID + } + db.monitorsMutex.Unlock() + } + o.Disconnect() + } else { + o.logger.V(3).Error(err, "error updating cache") + } + } + } +} + +func (o *ovsdbClient) handleInactivityProbes() { + defer o.handlerShutdown.Done() + stopCh := o.stopCh + trafficSeen := o.trafficSeen + timer := time.NewTimer(o.options.inactivityTimeout) + for { + select { + case <-stopCh: + timer.Stop() + return + case <-trafficSeen: + // We got some traffic from the server + // Timer must be stopped and drained of stale values before resetting it + // See: https://pkg.go.dev/time#NewTimer + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + // We timed out, send an echo request + ctx, cancel := context.WithTimeout(context.Background(), o.options.inactivityTimeout) + err := o.Echo(ctx) + if err != nil { + o.logger.V(3).Error(err, "server echo reply error") + o.Disconnect() + } + cancel() + } + timer.Reset(o.options.inactivityTimeout) + } +} + +func (o *ovsdbClient) handleDisconnectNotification() { + <-o.rpcClient.DisconnectNotify() + // close the stopCh, which will stop the cache event processor + close(o.stopCh) + if o.trafficSeen != nil { + close(o.trafficSeen) + } + o.metrics.numDisconnects.Inc() + // wait for client related handlers to shutdown + o.handlerShutdown.Wait() + o.rpcMutex.Lock() + if o.options.reconnect && !o.shutdown { + o.rpcClient = nil + o.rpcMutex.Unlock() + suppressionCounter := 1 + connect := func() error { + // need to ensure deferredUpdates is cleared on every reconnect attempt + for _, db := range o.databases { + db.cacheMutex.Lock() + db.deferredUpdates = make([]*bufferedUpdate, 0) + db.deferUpdates = true + db.cacheMutex.Unlock() + } + ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout) + defer cancel() + err := o.connect(ctx, true) + if err != nil { + if suppressionCounter < 5 { + o.logger.V(2).Error(err, "failed to reconnect") + } else if suppressionCounter == 5 { + o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+ + "for future attempts") + } + } + suppressionCounter++ + return err + } + o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address) + err := backoff.Retry(connect, o.options.backoff) + if err != nil { + // TODO: We should look at passing this back to the + // caller to handle + panic(err) + } + // this goroutine finishes, and is replaced with a new one (from Connect) + return + } + + // clear connection state + o.rpcClient = nil + o.rpcMutex.Unlock() + + for _, db := range o.databases { + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + db.cache = nil + // need to defer updates if/when we reconnect and clear any stale updates + db.deferUpdates = true + db.deferredUpdates = make([]*bufferedUpdate, 0) + + db.modelMutex.Lock() + defer db.modelMutex.Unlock() + db.model = model.NewPartialDatabaseModel(db.model.Client()) + + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + db.monitors = make(map[string]*Monitor) + } + o.metrics.numMonitors.Set(0) + + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = false + + select { + case o.disconnect <- struct{}{}: + // sent disconnect notification to client + default: + // client is not listening to the channel + } +} + +// _disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards. Assumes rpcMutex is held. +func (o *ovsdbClient) _disconnect() { + o.connected = false + if o.rpcClient == nil { + return + } + o.rpcClient.Close() +} + +// Disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards +func (o *ovsdbClient) Disconnect() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o._disconnect() +} + +// Close will close the connection to the OVSDB server +// It will remove all stored state ready for the next connection +// Even If the client was created with WithReconnect it will not reconnect afterwards +func (o *ovsdbClient) Close() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o.connected = false + if o.rpcClient == nil { + return + } + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = true + o.rpcClient.Close() +} + +// Ensures the cache is consistent by evaluating that the client is connected +// and the monitor is fully setup, with the cache populated. Caller must hold +// the database's cache mutex for reading. +func isCacheConsistent(db *database) bool { + // This works because when a client is disconnected the deferUpdates variable + // will be set to true. deferUpdates is also protected by the db.cacheMutex. + // When the client reconnects and then re-establishes the monitor; the final step + // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex + return !db.deferUpdates +} + +// best effort to ensure cache is in a good state for reading. RLocks the +// database's cache before returning; caller must always unlock. +func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) { + if !hasMonitors(db) { + db.cacheMutex.RLock() + return + } + // Check immediately as a fastpath + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.V(3).Info("warning: unable to ensure cache consistency for reading", + "database", dbName) + db.cacheMutex.RLock() + return + case <-ticker.C: + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + } + } +} + +func hasMonitors(db *database) bool { + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return len(db.monitors) > 0 +} + +// Client API interface wrapper functions +// We add this wrapper to allow users to access the API directly on the +// client object + +// Get implements the API interface's Get function +func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error { + return o.primaryDB().api.Get(ctx, model) +} + +// Create implements the API interface's Create function +func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) { + return o.primaryDB().api.Create(models...) +} + +// List implements the API interface's List function +func (o *ovsdbClient) List(ctx context.Context, result any) error { + return o.primaryDB().api.List(ctx, result) +} + +// Where implements the API interface's Where function +func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI { + return o.primaryDB().api.Where(models...) +} + +// WhereAny implements the API interface's WhereAny function +func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAny(m, conditions...) +} + +// WhereAll implements the API interface's WhereAll function +func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAll(m, conditions...) +} + +// WhereCache implements the API interface's WhereCache function +func (o *ovsdbClient) WhereCache(predicate any) ConditionalAPI { + return o.primaryDB().api.WhereCache(predicate) +} + +// Select implements the API interface's Select function +func (o *ovsdbClient) Select(m model.Model, fields ...any) ([]ovsdb.Operation, error) { + return o.primaryDB().api.Select(m, fields...) +} + +// GetSelectResultsByIndex parses the results of a transaction containing select operations +// and populates the target slice with the specified select query's results. +// The index parameter specifies which select query to retrieve (0-based). +// Use index=0 for single select queries (WhereAny, WhereCache, etc.). +func (o *ovsdbClient) GetSelectResultsByIndex(ops []ovsdb.Operation, results []ovsdb.OperationResult, target interface{}, index int) error { + if len(ops) != len(results) { + return fmt.Errorf("number of operations (%d) and results (%d) must match", len(ops), len(results)) + } + + // Validate target parameter + slicePtr := reflect.ValueOf(target) + if slicePtr.Type().Kind() != reflect.Ptr || slicePtr.IsNil() { + return &ErrWrongType{slicePtr.Type(), "target must be a non-nil pointer to a slice of models"} + } + + sliceVal := reflect.Indirect(slicePtr) + if sliceVal.Type().Kind() != reflect.Slice { + return &ErrWrongType{slicePtr.Type(), "target must be a pointer to a slice of models"} + } + + // GetSelectResultsByIndex only accepts a pointer to a slice of pointers to models + modelType := sliceVal.Type().Elem() + if modelType.Kind() != reflect.Ptr { + return &ErrWrongType{slicePtr.Type(), "target must be a pointer to a slice of model pointers"} + } + modelType = modelType.Elem() + + o.primaryDB().modelMutex.RLock() + dbModel := o.primaryDB().model + o.primaryDB().modelMutex.RUnlock() + + // Determine the target table name from the model type + dummyModel := reflect.New(modelType).Interface().(model.Model) + info, err := dbModel.NewModelInfo(dummyModel) + if err != nil { + return fmt.Errorf("failed to get model info for target type: %w", err) + } + targetTable := info.Metadata.TableName + + // Create a map to store merged rows (deduplicated by UUID) + mergedRows := make(map[string]ovsdb.Row) + mergeRows := func(result ovsdb.OperationResult) error { + if result.Error != "" { + return fmt.Errorf("operation error: %s: %s", result.Error, result.Details) + } + + for _, row := range result.Rows { + uuidVal, ok := row["_uuid"] + if !ok { + return fmt.Errorf("failed to get UUID from row: %v", row) + } + uuid, ok := uuidVal.(ovsdb.UUID) + if !ok { + return fmt.Errorf("failed to cast UUID from row: %v", row) + } + // Deduplicate by UUID - later results overwrite earlier ones + // Note different results may have different selected columns + mergedRows[uuid.GoUUID] = row + } + return nil + } + + // Single pass to find and collect results for the target index + currentIndex := -1 + var currentCorrelationID string + for i, op := range ops { + if op.Op != ovsdb.OperationSelect || op.Table != targetTable { + continue + } + + correlationID := ovsdb.GetCorrelationID(op) + if correlationID != currentCorrelationID { + currentIndex++ + currentCorrelationID = correlationID + } + + if currentIndex < index { + continue + } + if currentIndex > index { + break + } + + err := mergeRows(results[i]) + if err != nil { + return err + } + } + + if currentIndex < index { + return fmt.Errorf("index %d is out of range: found %d query groups for table '%s'", + index, currentIndex+1, targetTable) + } + + // Populate the target slice with optimized memory allocation + resultCount := len(mergedRows) + + // Pre-allocate slice with exact capacity to avoid repeated allocations + if sliceVal.IsNil() || sliceVal.Cap() < resultCount { + sliceVal.Set(reflect.MakeSlice(sliceVal.Type(), resultCount, resultCount)) + } else { + // Reuse existing slice but set to exact length + sliceVal.SetLen(resultCount) + } + + // Use index-based assignment to avoid append overhead + var i int + for uuid, row := range mergedRows { + model, err := model.CreateModel(dbModel, targetTable, &row, uuid) + if err != nil { + return fmt.Errorf("failed to create model: %w", err) + } + sliceVal.Index(i).Set(reflect.ValueOf(model)) + i++ + } + + return nil +} + +// GetSelectResults parses select operation results from a transaction. +// Equivalent to GetSelectResultsByIndex with index 0 (first select query) +func (o *ovsdbClient) GetSelectResults(ops []ovsdb.Operation, results []ovsdb.OperationResult, target interface{}) error { + return o.GetSelectResultsByIndex(ops, results, target, 0) +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go new file mode 100644 index 0000000000..2e217c78ee --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go @@ -0,0 +1,248 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// Conditional is the interface used by the ConditionalAPI to match on cache objects +// and generate ovsdb conditions +type Conditional interface { + // Generate returns a list of lists of conditions to be used in Operations + // Each element in the (outer) list corresponds to an operation + Generate() ([][]ovsdb.Condition, error) + // Returns the models that match the conditions + Matches() (map[string]model.Model, error) + // returns the table that this condition is associated with + Table() string +} + +func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) { + anyConditions := make([][]ovsdb.Condition, 0, len(models)) + for _, model := range models { + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + allConditions, err := dbModel.Mapper.NewEqualityCondition(info) + if err != nil { + return nil, err + } + anyConditions = append(anyConditions, allConditions) + } + return anyConditions, nil +} + +func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) { + anyConditions := [][]ovsdb.Condition{} + if singleOp { + anyConditions = append(anyConditions, []ovsdb.Condition{}) + } + for _, condition := range conditions { + ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value) + if err != nil { + return nil, err + } + allConditions := []ovsdb.Condition{*ovsdbCond} + if singleOp { + anyConditions[0] = append(anyConditions[0], allConditions...) + } else { + anyConditions = append(anyConditions, allConditions) + } + } + return anyConditions, nil +} + +// equalityConditional uses the indexes available in a provided model to find a +// matching model in the database. +type equalityConditional struct { + tableName string + models []model.Model + cache *cache.TableCache +} + +func (c *equalityConditional) Table() string { + return c.tableName +} + +// Returns the models that match the indexes available through the provided +// model. +func (c *equalityConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + return tableCache.RowsByModels(c.models) +} + +// Generate conditions based on the equality of the first available index. If +// the index can be matched against a model in the cache, the condition will be +// based on the UUID of the found model. Otherwise, the conditions will be based +// on the index. +func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, generate condition from models we were given + modelMap := make(map[string]model.Model, len(c.models)) + for i, m := range c.models { + // generateConditionsFromModels() ignores the map keys + // so just use the range index + modelMap[fmt.Sprintf("%d", i)] = m + } + return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap) + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// NewEqualityCondition creates a new equalityConditional +func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) { + return &equalityConditional{ + tableName: table, + models: models, + cache: cache, + }, nil +} + +// explicitConditional generates conditions based on the provided Condition list +type explicitConditional struct { + tableName string + anyConditions [][]ovsdb.Condition + cache *cache.TableCache +} + +func (c *explicitConditional) Table() string { + return c.tableName +} + +// Returns the models that match the conditions +func (c *explicitConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + for _, allConditions := range c.anyConditions { + models, err := tableCache.RowsByCondition(allConditions) + if err != nil { + return nil, err + } + for uuid, model := range models { + found[uuid] = model + } + } + return found, nil +} + +// Generate returns conditions based on the provided Condition list +func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, return conditions we were given + return c.anyConditions, nil + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newExplicitConditional creates a new explicitConditional +func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) { + dbModel := cache.DatabaseModel() + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll) + if err != nil { + return nil, err + } + return &explicitConditional{ + tableName: table, + anyConditions: anyConditions, + cache: cache, + }, nil +} + +// predicateConditional is a Conditional that calls a provided function pointer +// to match on models. +type predicateConditional struct { + tableName string + predicate any + cache *cache.TableCache +} + +// matches returns the result of the execution of the predicate +// Type verifications are not performed +// Returns the models that match the conditions +func (c *predicateConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + // run the predicate on a shallow copy of the models for speed and only + // clone the matches + for u, m := range tableCache.RowsShallow() { + ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)}) + if ret[0].Bool() { + found[u] = model.Clone(m) + } + } + return found, nil +} + +func (c *predicateConditional) Table() string { + return c.tableName +} + +// generate returns a list of conditions that match, by _uuid equality, all the objects that +// match the predicate +func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil { + return nil, err + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newPredicateConditional creates a new predicateConditional +func newPredicateConditional(table string, cache *cache.TableCache, predicate any) (Conditional, error) { + return &predicateConditional{ + tableName: table, + predicate: predicate, + cache: cache, + }, nil +} + +// errorConditional is a conditional that encapsulates an error +// It is used to delay the reporting of errors from conditional creation to API method call +type errorConditional struct { + err error +} + +func (e *errorConditional) Matches() (map[string]model.Model, error) { + return nil, e.err +} + +func (e *errorConditional) Table() string { + return "" +} + +func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) { + return nil, e.err +} + +func newErrorConditional(err error) Conditional { + return &errorConditional{ + err: fmt.Errorf("conditionerror: %s", err.Error()), + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go new file mode 100644 index 0000000000..a9c00f56a9 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +package client + +import ( + "crypto/tls" +) + +// Config is a structure used in provisioning a connection to ovsdb. +type Config struct { + Addr string + TLSConfig *tls.Config +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go new file mode 100644 index 0000000000..2f1aabba6b --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go @@ -0,0 +1,162 @@ +/* +Package client connects to, monitors and interacts with OVSDB servers (RFC7047). + +This package uses structs, that contain the 'ovs' field tag to determine which field goes to +which column in the database. We refer to pointers to this structs as Models. Example: + + type MyLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + Config map[string]string `ovsdb:"other_config"` + } + +Based on these Models a Database Model (see ClientDBModel type) is built to represent +the entire OVSDB: + + clientDBModel, _ := client.NewClientDBModel("OVN_Northbound", + map[string]client.Model{ + "Logical_Switch": &MyLogicalSwitch{}, + }) + +The ClientDBModel represents the entire Database (or the part of it we're interested in). +Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages +and store them in Model instances. +A client instance is created by simply specifying the connection information and the database model: + + ovs, _ := client.Connect(context.Background(), clientDBModel) + +# Main API + +After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like +to interact with the database: +List(), Get(), Create(), Update(), Mutate(), Delete(). + +The specific database table that the operation targets is automatically determined based on the type +of the parameter. + +In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(), +interact with the database so they return list of ovsdb.Operation objects that can be grouped together +and passed to client.Transact(). + +Others, such as List() and Get(), interact with the client's internal cache and are able to +return Model instances (or a list thereof) directly. + +# Conditions + +Some API functions (Create() and Get()), can be run directly. Others, require us to use +a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as +uses the Conditions to search the internal cache. + +The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions. + +Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances. +Conditions must refer to fields of the provided Model (via pointer to fields). Example: + + ls = &MyLogicalSwitch {} + ovs.Where(ls, client.Condition { + Field: &ls.Ports, + Function: ovsdb.ConditionIncludes, + Value: []string{"portUUID"}, + }) + +If no client.Condition is provided, the client will use any of fields that correspond to indexes to +generate an appropriate condition. Therefore the following two statements are equivalent: + + ls = &MyLogicalSwitch {UUID:"myUUID"} + + ovs.Where(ls) + + ovs.Where(ls, client.Condition { + Field: &ls.UUID, + Function: ovsdb.ConditionEqual, + Value: "myUUID"}, + }) + +Where() accepts multiple Condition instances (through variadic arguments). +If provided, the client will generate multiple operations each matching one condition. +For example, the following operation will delete all the Logical Switches named "foo" OR "bar": + + ops, err := ovs.Where(ls, + client.Condition { + Field: &ls.Name + Function: ovsdb.ConditionEqual, + Value: "foo", + },client.Condition { + Field: &ls.Port, + Function: ovsdb.ConditionIncludes, + Value: "bar", + }).Delete() + +To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll(). + +Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate +conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions +will be based on the index or condition fields themselves. + +A more flexible mechanism to search the cache is available: WhereCache() + +WhereCache() accepts a function that takes any Model as argument and returns a boolean. +It is used to search the cache so commonly used with List() function. For example: + + lsList := &[]LogicalSwitch{} + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching +cache element, an operation will be created matching on the "_uuid" column. The number of operations can be +quite large depending on the cache size and the provided function. Most likely there is a way to express the +same condition using Where() or WhereAll() which will be more efficient. + +# Get + +Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g: + + ls := &LogicalSwitch{UUID:"myUUID"} + err := ovs.Get(ls) + fmt.Printf("Name of the switch is: &s", ls.Name) + +# List + +List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache() + + lsList := &[]LogicalSwitch{} + err := ovs.List(lsList) // List all elements + + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +# Create + +Create returns a list of operations to create the models provided. E.g: + + ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"}) + +Update +Update returns a list of operations to update the matching rows to match the values of the provided model. E.g: + + ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}} + ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs} + +# Mutate + +Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g: + + ls := &LogicalSwitch{} + ops, err := ovs.Where(...).Mutate(&ls, client.Mutation { + Field: &ls.Config, + Mutator: ovsdb.MutateOperationInsert, + Value: map[string]string{"foo":"bar"}, + }) + +# Delete + +Delete returns a list of operations needed to delete the matching rows. E.g: + + ops, err := ovs.Where(...).Delete() +*/ +package client diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go new file mode 100644 index 0000000000..8c4e5f6f2d --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go @@ -0,0 +1,88 @@ +package client + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +const libovsdbName = "libovsdb" + +type metrics struct { + numUpdates *prometheus.CounterVec + numTableUpdates *prometheus.CounterVec + numDisconnects prometheus.Counter + numMonitors prometheus.Gauge + registerOnce sync.Once +} + +func (m *metrics) init(modelName string, namespace, subsystem string) { + // labels that are the same across all metrics + constLabels := prometheus.Labels{"primary_model": modelName} + + if namespace == "" { + namespace = libovsdbName + subsystem = "" + } + + m.numUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "update_messages_total", + Help: "Count of libovsdb monitor update messages processed, partitioned by database", + ConstLabels: constLabels, + }, + []string{"database"}, + ) + + m.numTableUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "table_updates_total", + Help: "Count of libovsdb monitor update messages per table", + ConstLabels: constLabels, + }, + []string{"database", "table"}, + ) + + m.numDisconnects = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "disconnects_total", + Help: "Count of libovsdb disconnects encountered", + ConstLabels: constLabels, + }, + ) + + m.numMonitors = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "monitors", + Help: "Number of running libovsdb ovsdb monitors", + ConstLabels: constLabels, + }, + ) +} + +func (m *metrics) register(r prometheus.Registerer) { + m.registerOnce.Do(func() { + r.MustRegister( + m.numUpdates, + m.numTableUpdates, + m.numDisconnects, + m.numMonitors, + ) + }) +} + +func (o *ovsdbClient) registerMetrics() { + if !o.options.shouldRegisterMetrics || o.options.registry == nil { + return + } + o.metrics.register(o.options.registry) + o.options.shouldRegisterMetrics = false +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go new file mode 100644 index 0000000000..09fca8f3f0 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go @@ -0,0 +1,136 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/google/uuid" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +const emptyUUID = "00000000-0000-0000-0000-000000000000" + +// Monitor represents a monitor +type Monitor struct { + Method string + Tables []TableMonitor + Errors []error + LastTransactionID string +} + +// newMonitor creates a new *Monitor with default values +func newMonitor() *Monitor { + return &Monitor{ + Method: ovsdb.ConditionalMonitorSinceRPC, + Errors: make([]error, 0), + LastTransactionID: emptyUUID, + } +} + +// NewMonitor creates a new Monitor with the provided options +func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor { + m := newMonitor() + for _, opt := range opts { + err := opt(o, m) + if err != nil { + m.Errors = append(m.Errors, err) + } + } + return m +} + +// MonitorOption adds Tables to a Monitor +type MonitorOption func(o *ovsdbClient, m *Monitor) error + +// MonitorCookie is the struct we pass to correlate from updates back to their +// originating Monitor request. +type MonitorCookie struct { + DatabaseName string `json:"databaseName"` + ID string `json:"id"` +} + +func newMonitorCookie(dbName string) MonitorCookie { + return MonitorCookie{ + DatabaseName: dbName, + ID: uuid.NewString(), + } +} + +// TableMonitor is a table to be monitored +type TableMonitor struct { + // Table is the table to be monitored + Table string + // Conditions are the conditions under which the table should be monitored + Conditions []ovsdb.Condition + // Fields are the fields in the model to monitor + // If none are supplied, all fields will be used + Fields []string +} + +func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []any) (*TableMonitor, error) { + dbModel := o.primaryDB().model + tableName := dbModel.FindTable(reflect.TypeOf(m)) + if tableName == "" { + return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m)) + } + + var columns []string + var ovsdbConds []ovsdb.Condition + + if len(fields) == 0 && len(conditions) == 0 { + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil + } + + data, err := dbModel.NewModelInfo(m) + if err != nil { + return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err) + } + for _, f := range fields { + column, err := data.ColumnByPtr(f) + if err != nil { + return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err) + } + columns = append(columns, column) + } + db := o.databases[o.primaryDBName] + mmapper := db.model.Mapper + for _, modelCond := range conditions { + ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value) + if err != nil { + return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err) + } + ovsdbConds = append(ovsdbConds, *ovsdbCond) + } + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil +} + +func WithTable(m model.Model, fields ...any) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} + +func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...any) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, conditions, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go new file mode 100644 index 0000000000..93e7d5b874 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go @@ -0,0 +1,181 @@ +package client + +import ( + "crypto/tls" + "errors" + "net/url" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultTCPEndpoint = "tcp:127.0.0.1:6640" + defaultSSLEndpoint = "ssl:127.0.0.1:6640" + defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock" +) + +type options struct { + endpoints []string + tlsConfig *tls.Config + reconnect bool + leaderOnly bool + validateModel bool + timeout time.Duration + backoff backoff.BackOff + logger *logr.Logger + registry prometheus.Registerer + shouldRegisterMetrics bool // in case metrics are changed after-the-fact + metricNamespace string // prometheus metric namespace + metricSubsystem string // prometheus metric subsystem + inactivityTimeout time.Duration +} + +type Option func(o *options) error + +func newOptions(opts ...Option) (*options, error) { + o := &options{} + for _, opt := range opts { + if err := opt(o); err != nil { + return nil, err + } + } + // if no endpoints are supplied, use the default unix socket + if len(o.endpoints) == 0 { + o.endpoints = []string{defaultUnixEndpoint} + } + return o, nil +} + +// WithTLSConfig sets the tls.Config for use by the client +func WithTLSConfig(cfg *tls.Config) Option { + return func(o *options) error { + o.tlsConfig = cfg + return nil + } +} + +// WithEndpoint sets the endpoint to be used by the client +// It can be used multiple times, and the first endpoint that +// successfully connects will be used. +// Endpoints are specified in OVSDB Connection Format +// For more details, see the ovsdb(7) man page +func WithEndpoint(endpoint string) Option { + return func(o *options) error { + ep, err := url.Parse(endpoint) + if err != nil { + return err + } + switch ep.Scheme { + case UNIX: + if len(ep.Path) == 0 { + o.endpoints = append(o.endpoints, defaultUnixEndpoint) + return nil + } + case TCP: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultTCPEndpoint) + return nil + } + case SSL: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultSSLEndpoint) + return nil + } + } + o.endpoints = append(o.endpoints, endpoint) + return nil + } +} + +// WithLeaderOnly tells the client to treat endpoints that are clustered +// and not the leader as down. +func WithLeaderOnly(leaderOnly bool) Option { + return func(o *options) error { + o.leaderOnly = leaderOnly + return nil + } +} + +// WithReconnect tells the client to automatically reconnect when +// disconnected. The timeout is used to construct the context on +// each call to Connect, while backoff dictates the backoff +// algorithm to use. Using WithReconnect implies that +// requested transactions will block until the client has fully reconnected, +// rather than immediately returning an error if there is no connection. +func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option { + return func(o *options) error { + o.reconnect = true + o.timeout = timeout + o.backoff = backoff + return nil + } +} + +// WithInactivityCheck tells the client to send Echo request to ovsdb server periodically +// upon inactivityTimeout. When Echo request fails, then it attempts to reconnect +// with server. The inactivity check is performed as long as the connection is established. +// The reconnectTimeout argument is used to construct the context on each call to Connect, +// while reconnectBackoff dictates the backoff algorithm to use. +func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration, + reconnectBackoff backoff.BackOff) Option { + return func(o *options) error { + if reconnectTimeout >= inactivityTimeout { + return errors.New("inactivity timeout value should be greater than reconnect timeout value") + } + o.reconnect = true + o.timeout = reconnectTimeout + o.backoff = reconnectBackoff + o.inactivityTimeout = inactivityTimeout + return nil + } +} + +// WithLogger allows setting a specific log sink. Otherwise, the default +// go log package is used. +func WithLogger(l *logr.Logger) Option { + return func(o *options) error { + o.logger = l + return nil + } +} + +// WithMetricsRegistry allows the user to specify a Prometheus metrics registry. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistry(r prometheus.Registerer) Option { + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + return nil + } +} + +// WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry +// and Prometheus metric namespace and subsystem of the component utilizing libovsdb. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option { + if namespace == "" || subsystem == "" { + panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty") + } + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + o.metricNamespace = namespace + o.metricSubsystem = subsystem + return nil + } +} + +// WithValidateModel allows for client-side tag-based schema validation on API.Create(), API.Mutate() and API.Update() +// including following constraints +// - Integer/Real Ranges: Checks if a number is within the defined minInteger/maxInteger or minReal/maxReal bounds. +// - Length/Size Constraints: Verifies that strings, sets, and maps adhere to minLength and maxLength requirements. +// - Enumerations: Ensures that a value is one of the predefined choices in a schema enum. +func WithValidateModel() Option { + return func(o *options) error { + o.validateModel = true + return nil + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/client/validation.go b/vendor/github.com/ovn-kubernetes/libovsdb/client/validation.go new file mode 100644 index 0000000000..be41432e34 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/client/validation.go @@ -0,0 +1,123 @@ +package client + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "github.com/ovn-kubernetes/libovsdb/mapper" + + "github.com/go-playground/validator/v10" + "github.com/ovn-kubernetes/libovsdb/model" +) + +// global validator instance +// Validator is designed to be thread-safe and used as a singleton instance. https://pkg.go.dev/github.com/go-playground/validator/v10#hdr-Singleton +var validate *validator.Validate + +func init() { + validate = validator.New(validator.WithRequiredStructEnabled()) + // Register custom validations if needed in the future + // e.g., validate.RegisterValidation("custom_tag", customValidationFunc) +} + +// formatValidationErrors formats validator.ValidationErrors into a detailed human-readable string +func formatValidationErrors(modelName string, context string, validationErrs validator.ValidationErrors) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("validation error for model %s", modelName)) + + // Append context if provided (e.g., "mutation on column X") + if context != "" { + sb.WriteString(fmt.Sprintf(": %s", context)) + } + + if len(validationErrs) > 0 { + sb.WriteString("; details: [") + var fieldErrorMessages []string + for _, fe := range validationErrs { + targetField := fe.Namespace() // e.g., "Model.Field" or "Model.Nested.Field" + // For validate.Var on simple type, Namespace might be empty. + if targetField == "" { + targetField = fe.Field() // Fallback to field name if any + } + if targetField == "" { // If still empty, use a generic term + targetField = "" + } + + errMsg := fmt.Sprintf("field '%s' (value: '%v') failed on rule '%s'", targetField, fe.Value(), fe.ActualTag()) + if fe.Param() != "" { + errMsg += fmt.Sprintf(" (param: %s)", fe.Param()) + } + fieldErrorMessages = append(fieldErrorMessages, errMsg) + } + sb.WriteString(strings.Join(fieldErrorMessages, ", ")) + sb.WriteString("]") + } + return sb.String() +} + +// validateModel performs validation on a given model struct using its tags. +func validateModel(m model.Model) error { + if m == nil { + return fmt.Errorf("model cannot be nil") + } + + // Perform the validation + err := validate.Struct(m) + if err != nil { + modelType := reflect.TypeOf(m).Elem() + modelNameStr := modelType.String() + var validationErrs validator.ValidationErrors + if errors.As(err, &validationErrs) { + formattedErr := formatValidationErrors(modelNameStr, "", validationErrs) + return fmt.Errorf("model validation failed: %s: %w", formattedErr, validationErrs) + } + return fmt.Errorf("error while validating model of type %s: %w", modelNameStr, err) + } + return nil +} + +// validateMutations performs validation on a given slice of mutations. +func validateMutations(model model.Model, info *mapper.Info, mutations ...model.Mutation) error { + modelType := reflect.TypeOf(model).Elem() + modelNameStr := modelType.String() + + for _, mutation := range mutations { + columnName, err := info.ColumnByPtr(mutation.Field) + if err != nil { + return fmt.Errorf("could not get column for mutation field: %w", err) + } + // Find the struct field corresponding to the column name + var structField reflect.StructField + var found bool + for i := 0; i < modelType.NumField(); i++ { + if modelType.Field(i).Tag.Get("ovsdb") == columnName { + structField = modelType.Field(i) + found = true + break + } + } + if !found { + return fmt.Errorf("could not find struct field for column %s", columnName) + } + + // Extract the validate tag + validateTag := structField.Tag.Get("validate") + + // Validate the mutation value if a tag exists + if validateTag != "" { + err = validate.Var(mutation.Value, validateTag) + if err != nil { + var validationErrs validator.ValidationErrors + if errors.As(err, &validationErrs) { + context := fmt.Sprintf("mutation on column %s", columnName) + formattedErr := formatValidationErrors(modelNameStr, context, validationErrs) + return fmt.Errorf("mutation validation failed: %s: %w", formattedErr, validationErrs) + } + return fmt.Errorf("error while validating mutation for model of type %s on column %s: %w", modelNameStr, columnName, err) + } + } + } + return nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go b/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go new file mode 100644 index 0000000000..37246388a4 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go @@ -0,0 +1,33 @@ +package database + +import ( + "github.com/google/uuid" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// Database abstracts a database that a server can use to store and transact data +type Database interface { + CreateDatabase(database string, model ovsdb.DatabaseSchema) error + Exists(database string) bool + NewTransaction(database string) Transaction + Commit(database string, id uuid.UUID, update Update) error + CheckIndexes(database string, table string, m model.Model) error + List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) + Get(database, table string, uuid string) (model.Model, error) + GetReferences(database, table, row string) (References, error) +} + +// Transaction abstracts a database transaction that can generate database +// updates +type Transaction interface { + Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update) +} + +// Update abstracts an update that can be committed to a database +type Update interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, newModel model.Model) error) error + ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error + ForReferenceUpdates(do func(references References) error) error +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go b/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go new file mode 100644 index 0000000000..c0a858c208 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go @@ -0,0 +1,5 @@ +/* +Package database collects database related types, interfaces and +implementations. +*/ +package database diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go b/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go new file mode 100644 index 0000000000..d8181a7a51 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go @@ -0,0 +1,71 @@ +package database + +// References tracks the references to rows from other rows at specific +// locations in the schema. +type References map[ReferenceSpec]Reference + +// ReferenceSpec specifies details about where in the schema a reference occurs. +type ReferenceSpec struct { + // ToTable is the table of the row to which the reference is made + ToTable string + + // FromTable is the table of the row from which the reference is made + FromTable string + + // FromColumn is the column of the row from which the reference is made + FromColumn string + + // FromValue flags if the reference is made on a map key or map value when + // the column is a map + FromValue bool +} + +// Reference maps the UUIDs of rows to which the reference is made to the +// rows it is made from +type Reference map[string][]string + +// GetReferences gets references to a row +func (rs References) GetReferences(table, uuid string) References { + refs := References{} + for spec, values := range rs { + if spec.ToTable != table { + continue + } + if _, ok := values[uuid]; ok { + refs[spec] = Reference{uuid: values[uuid]} + } + } + return refs +} + +// UpdateReferences updates the references with the provided ones. Dangling +// references, that is, the references of rows that are no longer referenced +// from anywhere, are cleaned up. +func (rs References) UpdateReferences(other References) { + for spec, otherRefs := range other { + for to, from := range otherRefs { + rs.updateReference(spec, to, from) + } + } +} + +// updateReference updates the references to a row at a specific location in the +// schema +func (rs References) updateReference(spec ReferenceSpec, to string, from []string) { + thisRefs, ok := rs[spec] + if !ok && len(from) > 0 { + // add references from a previously untracked location + rs[spec] = Reference{to: from} + return + } + if len(from) > 0 { + // replace references to this row at this specific location + thisRefs[to] = from + return + } + // otherwise remove previously tracked references + delete(thisRefs, to) + if len(thisRefs) == 0 { + delete(rs, spec) + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go b/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go new file mode 100644 index 0000000000..7b9a230bcc --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go @@ -0,0 +1,213 @@ +package mapper + +import ( + "fmt" + "reflect" + "slices" + + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// ErrColumnNotFound is an error that can occur when the column does not exist for a table +type ErrColumnNotFound struct { + column string + table string +} + +// Error implements the error interface +func (e *ErrColumnNotFound) Error() string { + return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table) +} + +func NewErrColumnNotFound(column, table string) *ErrColumnNotFound { + return &ErrColumnNotFound{ + column: column, + table: table, + } +} + +// Info is a struct that wraps an object with its metadata +type Info struct { + // FieldName indexed by column + Obj any + Metadata Metadata +} + +// Metadata represents the information needed to know how to map OVSDB columns into an objetss fields +type Metadata struct { + Fields map[string]string // Map of ColumnName -> FieldName + TableSchema *ovsdb.TableSchema // TableSchema associated + TableName string // Table name +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) FieldByColumn(column string) (any, error) { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return nil, NewErrColumnNotFound(column, i.Metadata.TableName) + } + return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) hasColumn(column string) bool { + _, ok := i.Metadata.Fields[column] + return ok +} + +// SetField sets the field in the column to the specified value +func (i *Info) SetField(column string, value any) error { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return fmt.Errorf("SetField: column %s not found in orm info", column) + } + fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName) + + if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) { + return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)", + column, value, reflect.TypeOf(value), fieldName, fieldValue.Type()) + } + fieldValue.Set(reflect.ValueOf(value)) + return nil +} + +// ColumnByPtr returns the column name that corresponds to the field by the field's pointer +func (i *Info) ColumnByPtr(fieldPtr any) (string, error) { + fieldPtrVal := reflect.ValueOf(fieldPtr) + if fieldPtrVal.Kind() != reflect.Ptr { + return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr) + } + offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer() + objType := reflect.TypeOf(i.Obj).Elem() + for j := 0; j < objType.NumField(); j++ { + if objType.Field(j).Offset == offset { + column := objType.Field(j).Tag.Get("ovsdb") + if _, ok := i.Metadata.Fields[column]; !ok { + return "", fmt.Errorf("field does not have orm column information") + } + return column, nil + } + } + return "", fmt.Errorf("field pointer does not correspond to orm struct") +} + +// getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch +// the object has non-default values +func (i *Info) getValidIndexes() ([][]string, error) { + var validIndexes [][]string + var possibleIndexes [][]string + + possibleIndexes = append(possibleIndexes, []string{"_uuid"}) + possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...) + + // Iterate through indexes and validate them +OUTER: + for _, idx := range possibleIndexes { + for _, col := range idx { + if !i.hasColumn(col) { + continue OUTER + } + columnSchema := i.Metadata.TableSchema.Column(col) + if columnSchema == nil { + continue OUTER + } + field, err := i.FieldByColumn(col) + if err != nil { + return nil, err + } + if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) { + continue OUTER + } + } + validIndexes = append(validIndexes, idx) + } + return validIndexes, nil +} + +// NewInfo creates a MapperInfo structure around an object based on a given table schema +func NewInfo(tableName string, table *ovsdb.TableSchema, obj any) (*Info, error) { + objPtrVal := reflect.ValueOf(obj) + if objPtrVal.Type().Kind() != reflect.Ptr { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objVal := reflect.Indirect(objPtrVal) + if objVal.Kind() != reflect.Struct { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objType := objVal.Type() + + fields := make(map[string]string, objType.NumField()) + for i := 0; i < objType.NumField(); i++ { + field := objType.Field(i) + colName := field.Tag.Get("ovsdb") + if colName == "" { + // Untagged fields are ignored + continue + } + column := table.Column(colName) + if column == nil { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: "Column does not exist in schema", + } + } + + // Perform schema-based type checking + expType := ovsdb.NativeType(column) + if expType != field.Type { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: fmt.Sprintf("Wrong type, column expects %s", expType), + } + } + fields[colName] = field.Name + } + + return &Info{ + Obj: obj, + Metadata: Metadata{ + Fields: fields, + TableSchema: table, + TableName: tableName, + }, + }, nil +} + +func (i *Info) ColumnsByPtr(fields ...any) ([]string, error) { + var columns []string + if len(fields) == 0 { + return nil, nil + } + // Use user-provided field pointers, with validation + columnSet := make(map[string]struct{}, len(fields)) + columns = make([]string, 0, len(fields)) + + for _, field := range fields { + colName, err := i.ColumnByPtr(field) + if err != nil { + return nil, fmt.Errorf("failed to get column name for field pointer: %w", err) + } + if _, ok := columnSet[colName]; !ok { + columns = append(columns, colName) + columnSet[colName] = struct{}{} + } + } + return columns, nil +} + +func (i *Info) ColumnsByPtrWithUUID(fields ...any) ([]string, error) { + columns, err := i.ColumnsByPtr(fields...) + if err != nil { + return nil, err + } + if slices.Contains(columns, "_uuid") { + return columns, nil + } + return append(columns, "_uuid"), nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go b/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go new file mode 100644 index 0000000000..4bf08ccee5 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go @@ -0,0 +1,352 @@ +package mapper + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// Mapper offers functions to interact with libovsdb through user-provided native structs. +// The way to specify what field of the struct goes +// to what column in the database id through field a field tag. +// The tag used is "ovsdb" and has the following structure +// 'ovsdb:"${COLUMN_NAME}"' +// +// where COLUMN_NAME is the name of the column and must match the schema +// +// Example: +// +// type MyObj struct { +// Name string `ovsdb:"name"` +// } +type Mapper struct { + Schema ovsdb.DatabaseSchema +} + +// ErrMapper describes an error in an Mapper type +type ErrMapper struct { + objType string + field string + fieldType string + fieldTag string + reason string +} + +func (e *ErrMapper) Error() string { + return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s", + e.objType, e.field, e.fieldType, e.fieldTag, e.reason) +} + +// NewMapper returns a new mapper +func NewMapper(schema ovsdb.DatabaseSchema) Mapper { + return Mapper{ + Schema: schema, + } +} + +// GetRowData transforms a Row to a struct based on its tags +// The result object must be given as pointer to an object with the right tags +func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error { + if row == nil { + return nil + } + return m.getRowData(*row, result) +} + +// getRowData transforms a map[string]any containing OvS types (e.g: a ResultRow +// has this format) to orm struct +// The result object must be given as pointer to an object with the right tags +func (m Mapper) getRowData(ovsData ovsdb.Row, result *Info) error { + for name, column := range result.Metadata.TableSchema.Columns { + if !result.hasColumn(name) { + // If provided struct does not have a field to hold this value, skip it + continue + } + + ovsElem, ok := ovsData[name] + if !ok { + // Ignore missing columns + continue + } + + nativeElem, err := ovsdb.OvsToNative(column, ovsElem) + if err != nil { + return fmt.Errorf("table %s, column %s: failed to extract native element: %s", + result.Metadata.TableName, name, err.Error()) + } + + if err := result.SetField(name, nativeElem); err != nil { + return err + } + } + return nil +} + +// GetRowDataWithUUID transforms a Row to a struct based on its tags, set uuid if possible +// The result object must be given as pointer to an object with the right tags +func (m Mapper) GetRowDataWithUUID(row *ovsdb.Row, result *Info) error { + if row == nil { + return nil + } + return m.getRowDataWithUUID(*row, result) +} + +// getRowDataWithUUID transforms a map[string]any containing OvS types (e.g: a ResultRow +// has this format) to orm struct, set uuid if possible +// The result object must be given as pointer to an object with the right tags +func (m Mapper) getRowDataWithUUID(ovsData ovsdb.Row, result *Info) error { + if err := m.getRowData(ovsData, result); err != nil { + return err + } + + // Explicitly handle the _uuid column after processing schema columns + if uuidOvsElem, uuidOk := ovsData["_uuid"]; uuidOk { + if uuidInfo, uuidInfoOk := uuidOvsElem.(ovsdb.UUID); uuidInfoOk { + // Check if the target model has a field tagged with "_uuid" + // The check `hasColumn` uses Metadata.Fields which is keyed by column name (tag) + if result.hasColumn("_uuid") { + // Set the field using the string value. SetField should handle it. + if err := result.SetField("_uuid", uuidInfo.GoUUID); err != nil { + return fmt.Errorf("failed to set _uuid field: %w", err) + } + } + } + } + return nil +} + +// NewRow transforms an orm struct to a map[string] any that can be used as libovsdb.Row +// By default, default or null values are skipped. This behavior can be modified by specifying +// a list of fields (pointers to fields in the struct) to be added to the row +func (m Mapper) NewRow(data *Info, fields ...any) (ovsdb.Row, error) { + columns := make(map[string]*ovsdb.ColumnSchema) + for k, v := range data.Metadata.TableSchema.Columns { + columns[k] = v + } + columns["_uuid"] = &ovsdb.UUIDColumn + ovsRow := make(map[string]any, len(columns)) + for name, column := range columns { + nativeElem, err := data.FieldByColumn(name) + if err != nil { + // If provided struct does not have a field to hold this value, skip it + continue + } + + // add specific fields + if len(fields) > 0 { + found := false + for _, f := range fields { + col, err := data.ColumnByPtr(f) + if err != nil { + return nil, err + } + if col == name { + found = true + break + } + } + if !found { + continue + } + } + if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) { + continue + } + ovsElem, err := ovsdb.NativeToOvs(column, nativeElem) + if err != nil { + return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error()) + } + ovsRow[name] = ovsElem + } + return ovsRow, nil +} + +// NewEqualityCondition returns a list of equality conditions that match a given object +// A list of valid columns that shall be used as a index can be provided. +// If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag +// If it does not exist or is null (""), then we will traverse all of the table indexes and +// use the first index (list of simultaneously unique columns) for which the provided mapper +// object has valid data. The order in which they are traversed matches the order defined +// in the schema. +// By `valid data` we mean non-default data. +func (m Mapper) NewEqualityCondition(data *Info, fields ...any) ([]ovsdb.Condition, error) { + var conditions []ovsdb.Condition + var condIndex [][]string + + // If index is provided, use it. If not, obtain the valid indexes from the mapper info + if len(fields) > 0 { + providedIndex := []string{} + for i := range fields { + if col, err := data.ColumnByPtr(fields[i]); err == nil { + providedIndex = append(providedIndex, col) + } else { + return nil, err + } + } + condIndex = append(condIndex, providedIndex) + } else { + var err error + condIndex, err = data.getValidIndexes() + if err != nil { + return nil, err + } + } + + if len(condIndex) == 0 { + return nil, fmt.Errorf("failed to find a valid index") + } + + // Pick the first valid index + for _, col := range condIndex[0] { + field, err := data.FieldByColumn(col) + if err != nil { + return nil, err + } + + column := data.Metadata.TableSchema.Column(col) + if column == nil { + return nil, fmt.Errorf("column %s not found", col) + } + ovsVal, err := ovsdb.NativeToOvs(column, field) + if err != nil { + return nil, err + } + conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal)) + } + return conditions, nil +} + +// EqualFields compares two mapped objects. +// The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond +// to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one) +func (m Mapper) EqualFields(one, other *Info, fields ...any) (bool, error) { + indexes := []string{} + for _, f := range fields { + col, err := one.ColumnByPtr(f) + if err != nil { + return false, err + } + indexes = append(indexes, col) + } + return m.equalIndexes(one, other, indexes...) +} + +// NewCondition returns a ovsdb.Condition based on the model +func (m Mapper) NewCondition(data *Info, field any, function ovsdb.ConditionFunction, value any) (*ovsdb.Condition, error) { + column, err := data.ColumnByPtr(field) + if err != nil { + return nil, err + } + + // Check that the condition is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil { + return nil, err + } + + ovsValue, err := ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + + ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue) + + return &ovsdbCondition, nil + +} + +// NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format) +// It takes care of field validation against the column type +func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value any) (*ovsdb.Mutation, error) { + // Check the column exists in the object + if !data.hasColumn(column) { + return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data) + } + // Check that the mutation is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil { + return nil, err + } + + var ovsValue any + var err error + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map { + // It's OK to cast the value to a list of elements because validation has passed + ovsSet, err := ovsdb.NewOvsSet(value) + if err != nil { + return nil, err + } + ovsValue = ovsSet + } else { + ovsValue, err = ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + } + + return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil +} + +// equalIndexes returns whether both models are equal from the DB point of view +// Two objects are considered equal if any of the following conditions is true +// They have a field tagged with column name '_uuid' and their values match +// For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal +// (as per RFC7047) +// The values of all of the optional indexes passed as variadic parameter to this function are equal. +func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) { + match := false + + oneIndexes, err := one.getValidIndexes() + if err != nil { + return false, err + } + + otherIndexes, err := other.getValidIndexes() + if err != nil { + return false, err + } + + oneIndexes = append(oneIndexes, indexes) + otherIndexes = append(otherIndexes, indexes) + + for _, lidx := range oneIndexes { + for _, ridx := range otherIndexes { + if reflect.DeepEqual(ridx, lidx) { + // All columns in an index must be simultaneously equal + for _, col := range lidx { + if !one.hasColumn(col) || !other.hasColumn(col) { + break + } + lfield, err := one.FieldByColumn(col) + if err != nil { + return false, err + } + rfield, err := other.FieldByColumn(col) + if err != nil { + return false, err + } + if reflect.DeepEqual(lfield, rfield) { + match = true + } else { + match = false + break + } + } + if match { + return true, nil + } + } + } + } + return false, nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go b/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go new file mode 100644 index 0000000000..2701bc5829 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go @@ -0,0 +1,178 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// ColumnKey addresses a column and optionally a key within a column +type ColumnKey struct { + Column string + Key any +} + +// ClientIndex defines a client index by a set of columns +type ClientIndex struct { + Columns []ColumnKey +} + +// ClientDBModel contains the client information needed to build a DatabaseModel +type ClientDBModel struct { + name string + types map[string]reflect.Type + indexes map[string][]ClientIndex +} + +// NewModel returns a new instance of a model from a specific string +func (db ClientDBModel) newModel(table string) (Model, error) { + mtype, ok := db.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Types returns the ClientDBModel Types +// The ClientDBModel types is a map of reflect.Types indexed by string +// The reflect.Type is a pointer to a struct that contains 'ovs' tags. +func (db ClientDBModel) Types() map[string]reflect.Type { + return db.types +} + +// Name returns the database name +func (db ClientDBModel) Name() string { + return db.name +} + +// Indexes returns the client indexes for a model +func (db ClientDBModel) Indexes(table string) []ClientIndex { + if len(db.indexes) == 0 { + return nil + } + if _, ok := db.indexes[table]; ok { + return copyIndexes(db.indexes)[table] + } + return nil +} + +// SetIndexes sets the client indexes. Client indexes are optional, similar to +// schema indexes and are only tracked in the specific client instances that are +// provided with this client model. A client index may point to multiple models +// as uniqueness is not enforced. They are defined per table and multiple +// indexes can be defined for a table. Each index consists of a set of columns. +// If the column is a map, specific keys of that map can be addressed for the +// index. +func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) { + db.indexes = copyIndexes(indexes) +} + +// Validate validates the DatabaseModel against the input schema +// Returns all the errors detected +func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error { + var errors []error + if db.name != schema.Name { + errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)", + db.name, schema.Name)) + } + + infos := make(map[string]*mapper.Info, len(db.types)) + for tableName := range db.types { + tableSchema := schema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName)) + continue + } + model, err := db.newModel(tableName) + if err != nil { + errors = append(errors, err) + continue + } + info, err := mapper.NewInfo(tableName, tableSchema, model) + if err != nil { + errors = append(errors, err) + continue + } + infos[tableName] = info + } + + for tableName, indexSets := range db.indexes { + info, ok := infos[tableName] + if !ok { + errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName)) + continue + } + for _, indexSet := range indexSets { + for _, indexColumn := range indexSet.Columns { + f, err := info.FieldByColumn(indexColumn.Column) + if err != nil { + errors = append( + errors, + fmt.Errorf("database model contains a client index for column %s that does not exist in table %s", + indexColumn.Column, + tableName)) + continue + } + if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map { + errors = append( + errors, + fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map", + indexColumn.Key, + indexColumn.Column, + tableName)) + continue + } + } + } + } + return errors +} + +// NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name +func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) { + types := make(map[string]reflect.Type, len(models)) + for table, model := range models { + modelType := reflect.TypeOf(model) + if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct { + return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct") + } + hasUUID := false + for i := 0; i < modelType.Elem().NumField(); i++ { + if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + hasUUID = true + break + } + } + if !hasUUID { + return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid") + } + + types[table] = modelType + } + return ClientDBModel{ + types: types, + name: name, + }, nil +} + +func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex { + if len(src) == 0 { + return nil + } + dst := make(map[string][]ClientIndex, len(src)) + for table, indexSets := range src { + dst[table] = make([]ClientIndex, 0, len(indexSets)) + for _, indexSet := range indexSets { + indexSetCopy := ClientIndex{ + Columns: make([]ColumnKey, len(indexSet.Columns)), + } + copy(indexSetCopy.Columns, indexSet.Columns) + dst[table] = append(dst[table], indexSetCopy) + } + } + return dst +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go b/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go new file mode 100644 index 0000000000..985ca5a90e --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go @@ -0,0 +1,118 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// A DatabaseModel represents libovsdb's metadata about the database. +// It's the result of combining the client's ClientDBModel and the server's Schema +type DatabaseModel struct { + client ClientDBModel + Schema ovsdb.DatabaseSchema + Mapper mapper.Mapper + metadata map[reflect.Type]mapper.Metadata +} + +// NewDatabaseModel returns a new DatabaseModel +func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) { + dbModel := &DatabaseModel{ + Schema: schema, + client: client, + } + errs := client.validate(schema) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.Mapper = mapper.NewMapper(schema) + var metadata map[reflect.Type]mapper.Metadata + metadata, errs = generateModelInfo(schema, client.types) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.metadata = metadata + return *dbModel, nil +} + +// NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet +func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel { + return DatabaseModel{ + client: client, + } +} + +// Valid returns whether the DatabaseModel is fully functional +func (db DatabaseModel) Valid() bool { + return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{}) +} + +// Client returns the DatabaseModel's client dbModel +func (db DatabaseModel) Client() ClientDBModel { + return db.client +} + +// NewModel returns a new instance of a model from a specific string +func (db DatabaseModel) NewModel(table string) (Model, error) { + mtype, ok := db.client.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Types returns the DatabaseModel Types +// the DatabaseModel types is a map of reflect.Types indexed by string +// The reflect.Type is a pointer to a struct that contains 'ovs' tags +// as described above. Such pointer to struct also implements the Model interface +func (db DatabaseModel) Types() map[string]reflect.Type { + return db.client.types +} + +// FindTable returns the string associated with a reflect.Type or "" +func (db DatabaseModel) FindTable(mType reflect.Type) string { + for table, tType := range db.client.types { + if tType == mType { + return table + } + } + return "" +} + +// generateModelMetadata creates metadata objects from all models included in the +// database and caches them for future re-use +func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) { + errors := []error{} + metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes)) + for tableName, tType := range modelTypes { + tableSchema := dbSchema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName)) + continue + } + + obj := reflect.New(tType.Elem()).Interface().(Model) + info, err := mapper.NewInfo(tableName, tableSchema, obj) + if err != nil { + errors = append(errors, err) + continue + } + metadata[tType] = info.Metadata + } + return metadata, errors +} + +// NewModelInfo returns a mapper.Info object based on a provided model +func (db DatabaseModel) NewModelInfo(obj any) (*mapper.Info, error) { + meta, ok := db.metadata[reflect.TypeOf(obj)] + if !ok { + return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj) + } + return &mapper.Info{ + Obj: obj, + Metadata: meta, + }, nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go b/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go new file mode 100644 index 0000000000..ea267ec54a --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go @@ -0,0 +1,131 @@ +package model + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// A Model is the base interface used to build Database Models. It is used +// to express how data from a specific Database Table shall be translated into structs +// A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag +// The value of 'ovs' field must be a valid column name in the OVS Database +// A field associated with the "_uuid" column mandatory. The rest of the columns are optional +// The struct may also have non-tagged fields (which will be ignored by the API calls) +// The Model interface must be implemented by the pointer to such type +// Example: +// +// type MyLogicalRouter struct { +// UUID string `ovsdb:"_uuid"` +// Name string `ovsdb:"name"` +// ExternalIDs map[string]string `ovsdb:"external_ids"` +// LoadBalancers []string `ovsdb:"load_balancer"` +// } +type Model any + +type CloneableModel interface { + CloneModel() Model + CloneModelInto(Model) +} + +type ComparableModel interface { + EqualsModel(Model) bool +} + +// Clone creates a deep copy of a model +func Clone(a Model) Model { + if cloner, ok := a.(CloneableModel); ok { + return cloner.CloneModel() + } + + val := reflect.Indirect(reflect.ValueOf(a)) + b := reflect.New(val.Type()).Interface() + aBytes, _ := json.Marshal(a) + _ = json.Unmarshal(aBytes, b) + return b +} + +// CloneInto deep copies a model into another one +func CloneInto(src, dst Model) { + if cloner, ok := src.(CloneableModel); ok { + cloner.CloneModelInto(dst) + return + } + + aBytes, _ := json.Marshal(src) + _ = json.Unmarshal(aBytes, dst) +} + +func Equal(l, r Model) bool { + if comparator, ok := l.(ComparableModel); ok { + return comparator.EqualsModel(r) + } + + return reflect.DeepEqual(l, r) +} + +func modelSetUUID(model Model, uuid string) error { + modelVal := reflect.ValueOf(model).Elem() + for i := 0; i < modelVal.NumField(); i++ { + if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + modelVal.Field(i).Set(reflect.ValueOf(uuid)) + return nil + } + } + return fmt.Errorf("model is expected to have a string field mapped to column _uuid") +} + +// Condition is a model-based representation of an OVSDB Condition +type Condition struct { + // Pointer to the field of the model where the operation applies + Field any + // Condition function + Function ovsdb.ConditionFunction + // Value to use in the condition + Value any +} + +// Mutation is a model-based representation of an OVSDB Mutation +type Mutation struct { + // Pointer to the field of the model that shall be mutated + Field any + // String representing the mutator (as per RFC7047) + Mutator ovsdb.Mutator + // Value to use in the mutation + Value any +} + +// CreateModel creates a new Model instance based on an OVSDB Row information +func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("database model not valid") + } + + table := dbModel.Schema.Table(tableName) + if table == nil { + return nil, fmt.Errorf("table %s not found", tableName) + } + model, err := dbModel.NewModel(tableName) + if err != nil { + return nil, err + } + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + err = dbModel.Mapper.GetRowData(row, info) + if err != nil { + return nil, err + } + + if uuid != "" { + if err := info.SetField("_uuid", uuid); err != nil { + return nil, err + } + } + + return model, nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go new file mode 100644 index 0000000000..8377f79db3 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go @@ -0,0 +1,433 @@ +package ovsdb + +import ( + "fmt" + "reflect" +) + +var ( + intType = reflect.TypeOf(0) + realType = reflect.TypeOf(0.0) + boolType = reflect.TypeOf(true) + strType = reflect.TypeOf("") +) + +// ErrWrongType describes typing error +type ErrWrongType struct { + from string + expected string + got any +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)", + e.from, e.expected, e.got, reflect.TypeOf(e.got)) +} + +// NewErrWrongType creates a new ErrWrongType +func NewErrWrongType(from, expected string, got any) error { + return &ErrWrongType{ + from: from, + expected: expected, + got: got, + } +} + +// NativeTypeFromAtomic returns the native type that can hold a value of an +// AtomicType +func NativeTypeFromAtomic(basicType string) reflect.Type { + switch basicType { + case TypeInteger: + return intType + case TypeReal: + return realType + case TypeBoolean: + return boolType + case TypeString: + return strType + case TypeUUID: + return strType + default: + panic("Unknown basic type %s basicType") + } +} + +// NativeType returns the reflect.Type that can hold the value of a column +// OVS Type to Native Type convertions: +// +// OVS sets -> go slices or a go native type depending on the key +// OVS uuid -> go strings +// OVS map -> go map +// OVS enum -> go native type depending on the type of the enum key +func NativeType(column *ColumnSchema) reflect.Type { + switch column.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString: + return NativeTypeFromAtomic(column.Type) + case TypeEnum: + return NativeTypeFromAtomic(column.TypeObj.Key.Type) + case TypeMap: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type) + return reflect.MapOf(keyType, valueType) + case TypeSet: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + // optional type + if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 { + return reflect.PointerTo(keyType) + } + // non-optional type with max 1 + if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 { + return keyType + } + return reflect.SliceOf(keyType) + default: + panic(fmt.Errorf("unknown extended type %s", column.Type)) + } +} + +// OvsToNativeAtomic returns the native type of the basic ovs type +func OvsToNativeAtomic(basicType string, ovsElem any) (any, error) { + switch basicType { + case TypeReal, TypeString, TypeBoolean: + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(ovsElem) != naType { + return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem) + } + return ovsElem, nil + case TypeInteger: + naType := NativeTypeFromAtomic(basicType) + // Default decoding of numbers is float64, convert them to int + if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) { + return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem) + } + return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil + case TypeUUID: + uuid, ok := ovsElem.(UUID) + if !ok { + return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem) + } + return uuid.GoUUID, nil + default: + panic(fmt.Errorf("unknown atomic type %s", basicType)) + } +} + +func OvsToNativeSlice(baseType string, ovsElem any) (any, error) { + naType := NativeTypeFromAtomic(baseType) + var nativeSet reflect.Value + switch ovsSet := ovsElem.(type) { + case OvsSet: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet)) + for _, v := range ovsSet.GoSet { + nv, err := OvsToNativeAtomic(baseType, v) + if err != nil { + return nil, err + } + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + + default: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1) + nv, err := OvsToNativeAtomic(baseType, ovsElem) + if err != nil { + return nil, err + } + + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + return nativeSet.Interface(), nil +} + +// OvsToNative transforms an ovs type to native one based on the column type information +func OvsToNative(column *ColumnSchema, ovsElem any) (any, error) { + switch column.Type { + case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID: + return OvsToNativeAtomic(column.Type, ovsElem) + case TypeEnum: + return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + case TypeSet: + naType := NativeType(column) + // The inner slice is []any + // We need to convert it to the real type os slice + switch naType.Kind() { + case reflect.Ptr: + switch ovsSet := ovsElem.(type) { + case OvsSet: + if len(ovsSet.GoSet) > 1 { + return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet)) + } + if len(ovsSet.GoSet) == 0 { + return reflect.Zero(naType).Interface(), nil + } + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0]) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + default: + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + } + case reflect.Slice: + return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem) + default: + return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind()) + } + case TypeMap: + naType := NativeType(column) + ovsMap, ok := ovsElem.(OvsMap) + if !ok { + return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem) + } + // The inner slice is map[interface]any + // We need to convert it to the real type os slice + nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap)) + for k, v := range ovsMap.GoMap { + nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k) + if err != nil { + return nil, err + } + nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v) + if err != nil { + return nil, err + } + nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv)) + } + return nativeMap.Interface(), nil + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// NativeToOvsAtomic returns the OVS type of the atomic native value +func NativeToOvsAtomic(basicType string, nativeElem any) (any, error) { + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(nativeElem) != naType { + return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem) + } + switch basicType { + case TypeUUID: + return UUID{GoUUID: nativeElem.(string)}, nil + default: + return nativeElem, nil + } +} + +// NativeToOvs transforms an native type to a ovs type based on the column type information +func NativeToOvs(column *ColumnSchema, rawElem any) (any, error) { + naType := NativeType(column) + if t := reflect.TypeOf(rawElem); t != naType { + return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem) + } + + switch column.Type { + case TypeInteger, TypeReal, TypeString, TypeBoolean: + return rawElem, nil + case TypeEnum: + // Enums containing UUIDs should fall through to the UUID case below + if column.TypeObj.Key.Type != TypeUUID { + return rawElem, nil + } + fallthrough + case TypeUUID: + return UUID{GoUUID: rawElem.(string)}, nil + case TypeSet: + var ovsSet OvsSet + if column.TypeObj.Key.Type == TypeUUID { + ovsSlice := []any{} + if _, ok := rawElem.([]string); ok { + for _, v := range rawElem.([]string) { + uuid := UUID{GoUUID: v} + ovsSlice = append(ovsSlice, uuid) + } + } else if _, ok := rawElem.(*string); ok { + v := rawElem.(*string) + if v != nil { + uuid := UUID{GoUUID: *v} + ovsSlice = append(ovsSlice, uuid) + } + } else { + return nil, fmt.Errorf("uuid slice was neither []string or *string") + } + ovsSet = OvsSet{GoSet: ovsSlice} + + } else { + var err error + ovsSet, err = NewOvsSet(rawElem) + if err != nil { + return nil, err + } + } + return ovsSet, nil + case TypeMap: + nativeMapVal := reflect.ValueOf(rawElem) + ovsMap := make(map[any]any, nativeMapVal.Len()) + for _, key := range nativeMapVal.MapKeys() { + ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface()) + if err != nil { + return nil, err + } + ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface()) + if err != nil { + return nil, err + } + ovsMap[ovsKey] = ovsVal + } + return OvsMap{GoMap: ovsMap}, nil + + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// IsDefaultValue checks if a provided native element corresponds to the default value of its +// designated column type +func IsDefaultValue(column *ColumnSchema, nativeElem any) bool { + switch column.Type { + case TypeEnum: + return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type) + default: + return isDefaultBaseValue(nativeElem, column.Type) + } +} + +// ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType +func validateMutationAtomic(atype string, mutator Mutator, value any) error { + nType := NativeTypeFromAtomic(atype) + if reflect.TypeOf(value) != nType { + return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value) + } + + switch atype { + case TypeUUID, TypeString, TypeBoolean: + return fmt.Errorf("atomictype %s does not support mutation", atype) + case TypeReal: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide: + return nil + default: + return fmt.Errorf("wrong mutator for real type %s", mutator) + } + case TypeInteger: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo: + return nil + default: + return fmt.Errorf("wrong mutator for integer type: %s", mutator) + } + default: + panic("Unsupported Atomic Type") + } +} + +// ValidateMutation checks if the mutation value and mutator string area appropriate +// for a given column based on the rules specified RFC7047 +func ValidateMutation(column *ColumnSchema, mutator Mutator, value any) error { + if !column.Mutable() { + return fmt.Errorf("column is not mutable") + } + switch column.Type { + case TypeSet: + switch mutator { + case MutateOperationInsert, MutateOperationDelete: + // RFC7047 says a may be an with a single + // element. Check if we can store this value in our column + if reflect.TypeOf(value).Kind() != reflect.Slice { + if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column), + NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String()) + } + return nil + } + if NativeType(column) != reflect.TypeOf(value) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + default: + return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value) + } + case TypeMap: + switch mutator { + case MutateOperationInsert: + // Value must be a map of the same kind + if reflect.TypeOf(value) != NativeType(column) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + case MutateOperationDelete: + // Value must be a map of the same kind or a set of keys to delete + if reflect.TypeOf(value) != NativeType(column) && + reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + "compatible map type", value) + } + return nil + + default: + return fmt.Errorf("wrong mutator for map type: %s", mutator) + } + case TypeEnum: + // RFC does not clarify what to do with enums. + return fmt.Errorf("enums do not support mutation") + default: + return validateMutationAtomic(column.Type, mutator, value) + } +} + +func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue any) error { + if NativeType(column) != reflect.TypeOf(nativeValue) { + return NewErrWrongType(fmt.Sprintf("Condition for column %s", column), + NativeType(column).String(), nativeValue) + } + + switch column.Type { + case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID, TypeEnum: + switch function { + case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes: + return nil + default: + return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type) + } + case TypeInteger, TypeReal: + // All functions are valid + return nil + default: + panic("Unsupported Type") + } +} + +func isDefaultBaseValue(elem any, etype ExtendedType) bool { + value := reflect.ValueOf(elem) + if !value.IsValid() { + return true + } + if reflect.TypeOf(elem).Kind() == reflect.Ptr { + return reflect.ValueOf(elem).IsZero() + } + switch etype { + case TypeUUID: + return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == "" + case TypeMap, TypeSet: + if value.Kind() == reflect.Array { + return value.Len() == 0 + } + return value.IsNil() || value.Len() == 0 + case TypeString: + return elem.(string) == "" + case TypeInteger: + return elem.(int) == 0 + case TypeReal: + return elem.(float64) == 0 + default: + return false + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go new file mode 100644 index 0000000000..326e695548 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go @@ -0,0 +1,223 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +type ConditionFunction string +type WaitCondition string + +const ( + // ConditionLessThan is the less than condition + ConditionLessThan ConditionFunction = "<" + // ConditionLessThanOrEqual is the less than or equal condition + ConditionLessThanOrEqual ConditionFunction = "<=" + // ConditionEqual is the equal condition + ConditionEqual ConditionFunction = "==" + // ConditionNotEqual is the not equal condition + ConditionNotEqual ConditionFunction = "!=" + // ConditionGreaterThan is the greater than condition + ConditionGreaterThan ConditionFunction = ">" + // ConditionGreaterThanOrEqual is the greater than or equal condition + ConditionGreaterThanOrEqual ConditionFunction = ">=" + // ConditionIncludes is the includes condition + ConditionIncludes ConditionFunction = "includes" + // ConditionExcludes is the excludes condition + ConditionExcludes ConditionFunction = "excludes" + + // WaitConditionEqual is the equal condition + WaitConditionEqual WaitCondition = "==" + // WaitConditionNotEqual is the not equal condition + WaitConditionNotEqual WaitCondition = "!=" +) + +// Condition is described in RFC 7047: 5.1 +type Condition struct { + Column string + Function ConditionFunction + Value any +} + +func (c Condition) String() string { + return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value) +} + +// NewCondition returns a new condition +func NewCondition(column string, function ConditionFunction, value any) Condition { + return Condition{ + Column: column, + Function: function, + Value: value, + } +} + +// MarshalJSON marshals a condition to a 3 element JSON array +func (c Condition) MarshalJSON() ([]byte, error) { + v := []any{c.Column, c.Function, c.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Condition +func (c *Condition) UnmarshalJSON(b []byte) error { + var v []any + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + c.Column = v[0].(string) + function := ConditionFunction(v[1].(string)) + switch function { + case ConditionEqual, + ConditionNotEqual, + ConditionIncludes, + ConditionExcludes, + ConditionGreaterThan, + ConditionGreaterThanOrEqual, + ConditionLessThan, + ConditionLessThanOrEqual: + c.Function = function + default: + return fmt.Errorf("%s is not a valid function", function) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + c.Value = vv + return nil +} + +// Evaluate will evaluate the condition on the two provided values +// The conditions operately differently depending on the type of +// the provided values. The behavior is as described in RFC7047 +func (c ConditionFunction) Evaluate(a any, b any) (bool, error) { + x := reflect.ValueOf(a) + y := reflect.ValueOf(b) + if x.Kind() != y.Kind() { + return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind()) + } + switch c { + case ConditionEqual: + return reflect.DeepEqual(a, b), nil + case ConditionNotEqual: + return !reflect.DeepEqual(a, b), nil + case ConditionIncludes: + switch x.Kind() { + case reflect.Slice: + return sliceContains(x, y), nil + case reflect.Map: + return mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionExcludes: + switch x.Kind() { + case reflect.Slice: + return !sliceContains(x, y), nil + case reflect.Map: + return !mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return !reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThan: + switch x.Kind() { + case reflect.Int: + return x.Int() > y.Int(), nil + case reflect.Float64: + return x.Float() > y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() >= y.Int(), nil + case reflect.Float64: + return x.Float() >= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThan: + switch x.Kind() { + case reflect.Int: + return x.Int() < y.Int(), nil + case reflect.Float64: + return x.Float() < y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() <= y.Int(), nil + case reflect.Float64: + return x.Float() <= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + default: + return false, fmt.Errorf("unsupported condition function %s", c) + } + // we should never get here + return false, fmt.Errorf("unreachable condition") +} + +func sliceContains(x, y reflect.Value) bool { + for i := 0; i < y.Len(); i++ { + found := false + vy := y.Index(i) + for j := 0; j < x.Len(); j++ { + vx := x.Index(j) + if vy.Kind() == reflect.Interface { + if vy.Elem() == vx.Elem() { + found = true + break + } + } else { + if vy.Interface() == vx.Interface() { + found = true + break + } + } + } + if !found { + return false + } + } + return true +} + +func mapContains(x, y reflect.Value) bool { + iter := y.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + vx := x.MapIndex(k) + if !vx.IsValid() { + return false + } + if v.Kind() != reflect.Interface { + if v.Interface() != vx.Interface() { + return false + } + } else { + if v.Elem() != vx.Elem() { + return false + } + } + } + return true +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go new file mode 100644 index 0000000000..4a85c541ce --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go @@ -0,0 +1,373 @@ +package ovsdb + +import "fmt" + +const ( + referentialIntegrityViolation = "referential integrity violation" + constraintViolation = "constraint violation" + resourcesExhausted = "resources exhausted" + ioError = "I/O error" + duplicateUUIDName = "duplicate uuid name" + domainError = "domain error" + rangeError = "range error" + timedOut = "timed out" + notSupported = "not supported" + aborted = "aborted" + notOwner = "not owner" +) + +// errorFromResult returns an specific OVSDB error type from +// an OperationResult +func errorFromResult(op *Operation, r OperationResult) OperationError { + if r.Error == "" { + return nil + } + switch r.Error { + case referentialIntegrityViolation: + return &ReferentialIntegrityViolation{r.Details, op} + case constraintViolation: + return &ConstraintViolation{r.Details, op} + case resourcesExhausted: + return &ResourcesExhausted{r.Details, op} + case ioError: + return &IOError{r.Details, op} + case duplicateUUIDName: + return &DuplicateUUIDName{r.Details, op} + case domainError: + return &DomainError{r.Details, op} + case rangeError: + return &RangeError{r.Details, op} + case timedOut: + return &TimedOut{r.Details, op} + case notSupported: + return &NotSupported{r.Details, op} + case aborted: + return &Aborted{r.Details, op} + case notOwner: + return &NotOwner{r.Details, op} + default: + return &Error{r.Error, r.Details, op} + } +} + +func ResultFromError(err error) OperationResult { + if err == nil { + panic("Program error: passed nil error to resultFromError") + } + switch e := err.(type) { + case *ReferentialIntegrityViolation: + return OperationResult{Error: referentialIntegrityViolation, Details: e.details} + case *ConstraintViolation: + return OperationResult{Error: constraintViolation, Details: e.details} + case *ResourcesExhausted: + return OperationResult{Error: resourcesExhausted, Details: e.details} + case *IOError: + return OperationResult{Error: ioError, Details: e.details} + case *DuplicateUUIDName: + return OperationResult{Error: duplicateUUIDName, Details: e.details} + case *DomainError: + return OperationResult{Error: domainError, Details: e.details} + case *RangeError: + return OperationResult{Error: rangeError, Details: e.details} + case *TimedOut: + return OperationResult{Error: timedOut, Details: e.details} + case *NotSupported: + return OperationResult{Error: notSupported, Details: e.details} + case *Aborted: + return OperationResult{Error: aborted, Details: e.details} + case *NotOwner: + return OperationResult{Error: notOwner, Details: e.details} + default: + return OperationResult{Error: e.Error()} + } +} + +// CheckOperationResults checks whether the provided operation was a success +// If the operation was a success, it will return nil, nil +// If the operation failed, due to a error committing the transaction it will +// return nil, error. +// Finally, in the case where one or more of the operations in the transaction +// failed, we return []OperationErrors, error +// Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in +// the original Operations struct. You may also perform type assertions against +// the error so the caller can decide how best to handle it +func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) { + // this shouldn't happen, but we'll cover the case to be certain + if len(result) < len(ops) { + return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result)) + } + var errs []OperationError + for i, op := range result { + // RFC 7047: if all of the operations succeed, but the results cannot + // be committed, then "result" will have one more element than "params", + // with the additional element being an . + if i >= len(ops) { + return errs, errorFromResult(nil, op) + } + if err := errorFromResult(&ops[i], op); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errs, fmt.Errorf("%d ovsdb operations failed", len(errs)) + } + return nil, nil +} + +// OperationError represents an error that occurred as part of an +// OVSDB Operation +type OperationError interface { + error + // Operation is a pointer to the operation which caused the error + Operation() *Operation +} + +// ReferentialIntegrityViolation is explained in RFC 7047 4.1.3 +type ReferentialIntegrityViolation struct { + details string + operation *Operation +} + +func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation { + return &ReferentialIntegrityViolation{details: details} +} + +// Error implements the error interface +func (e *ReferentialIntegrityViolation) Error() string { + msg := referentialIntegrityViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ReferentialIntegrityViolation) Operation() *Operation { + return e.operation +} + +// ConstraintViolation is described in RFC 7047: 4.1.3 +type ConstraintViolation struct { + details string + operation *Operation +} + +func NewConstraintViolation(details string) *ConstraintViolation { + return &ConstraintViolation{details: details} +} + +// Error implements the error interface +func (e *ConstraintViolation) Error() string { + msg := constraintViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ConstraintViolation) Operation() *Operation { + return e.operation +} + +// ResourcesExhausted is described in RFC 7047: 4.1.3 +type ResourcesExhausted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *ResourcesExhausted) Error() string { + msg := resourcesExhausted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ResourcesExhausted) Operation() *Operation { + return e.operation +} + +// IOError is described in RFC7047: 4.1.3 +type IOError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *IOError) Error() string { + msg := ioError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *IOError) Operation() *Operation { + return e.operation +} + +// DuplicateUUIDName is described in RFC7047 5.2.1 +type DuplicateUUIDName struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DuplicateUUIDName) Error() string { + msg := duplicateUUIDName + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DuplicateUUIDName) Operation() *Operation { + return e.operation +} + +// DomainError is described in RFC 7047: 5.2.4 +type DomainError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DomainError) Error() string { + msg := domainError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DomainError) Operation() *Operation { + return e.operation +} + +// RangeError is described in RFC 7047: 5.2.4 +type RangeError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *RangeError) Error() string { + msg := rangeError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *RangeError) Operation() *Operation { + return e.operation +} + +// TimedOut is described in RFC 7047: 5.2.6 +type TimedOut struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *TimedOut) Error() string { + msg := timedOut + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *TimedOut) Operation() *Operation { + return e.operation +} + +// NotSupported is described in RFC 7047: 5.2.7 +type NotSupported struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotSupported) Error() string { + msg := notSupported + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotSupported) Operation() *Operation { + return e.operation +} + +// Aborted is described in RFC 7047: 5.2.8 +type Aborted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *Aborted) Error() string { + msg := aborted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Aborted) Operation() *Operation { + return e.operation +} + +// NotOwner is described in RFC 7047: 5.2.9 +type NotOwner struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotOwner) Error() string { + msg := notOwner + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotOwner) Operation() *Operation { + return e.operation +} + +// Error is a generic OVSDB Error type that implements the +// OperationError and error interfaces +type Error struct { + name string + details string + operation *Operation +} + +// Error implements the error interface +func (e *Error) Error() string { + msg := e.name + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Error) Operation() *Operation { + return e.operation +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go new file mode 100644 index 0000000000..a80bfa16fc --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go @@ -0,0 +1,92 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsMap is the JSON map structure used for OVSDB +// RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps. +// A 2-element JSON array that represents a database map value. The +// first element of the array must be the string "map", and the +// second element must be an array of zero or more s giving the +// values in the map. All of the s must have the same key and +// value types. +type OvsMap struct { + GoMap map[any]any +} + +// MarshalJSON marshalls an OVSDB style Map to a byte array +func (o OvsMap) MarshalJSON() ([]byte, error) { + if len(o.GoMap) > 0 { + var ovsMap, innerMap []any + ovsMap = append(ovsMap, "map") + for key, val := range o.GoMap { + var mapSeg []any + mapSeg = append(mapSeg, key) + mapSeg = append(mapSeg, val) + innerMap = append(innerMap, mapSeg) + } + ovsMap = append(ovsMap, innerMap) + return json.Marshal(ovsMap) + } + return []byte("[\"map\",[]]"), nil +} + +// UnmarshalJSON unmarshals an OVSDB style Map from a byte array +func (o *OvsMap) UnmarshalJSON(b []byte) (err error) { + var oMap []any + o.GoMap = make(map[any]any) + if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 { + innerSlice := oMap[1].([]any) + for _, val := range innerSlice { + f := val.([]any) + var k any + switch f[0].(type) { + case []any: + vSet := f[0].([]any) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + k = goSlice + default: + k = f[0] + } + switch f[1].(type) { + case []any: + vSet := f[1].([]any) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + o.GoMap[k] = goSlice + default: + o.GoMap[k] = f[1] + } + } + } + return err +} + +// NewOvsMap will return an OVSDB style map from a provided Golang Map +func NewOvsMap(goMap any) (OvsMap, error) { + v := reflect.ValueOf(goMap) + if v.Kind() != reflect.Map { + return OvsMap{}, fmt.Errorf("ovsmap supports only go map types") + } + + genMap := make(map[any]any) + keys := v.MapKeys() + for _, key := range keys { + genMap[key.Interface()] = v.MapIndex(key).Interface() + } + return OvsMap{genMap}, nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go new file mode 100644 index 0000000000..58e57f7192 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go @@ -0,0 +1,88 @@ +package ovsdb + +import "encoding/json" + +// MonitorSelect represents a monitor select according to RFC7047 +type MonitorSelect struct { + initial *bool + insert *bool + delete *bool + modify *bool +} + +// NewMonitorSelect returns a new MonitorSelect with the provided values +func NewMonitorSelect(withInitialState, withInserts, withDeletes, withModifies bool) *MonitorSelect { + return &MonitorSelect{ + initial: &withInitialState, + insert: &withInserts, + delete: &withDeletes, + modify: &withModifies, + } +} + +// NewDefaultMonitorSelect returns a new MonitorSelect with default values +func NewDefaultMonitorSelect() *MonitorSelect { + return NewMonitorSelect(true, true, true, true) +} + +// Initial returns whether or not an initial response will be sent +func (m MonitorSelect) Initial() bool { + if m.initial == nil { + return true + } + return *m.initial +} + +// Insert returns whether we will receive updates for inserts +func (m MonitorSelect) Insert() bool { + if m.insert == nil { + return true + } + return *m.insert +} + +// Delete returns whether we will receive updates for deletions +func (m MonitorSelect) Delete() bool { + if m.delete == nil { + return true + } + return *m.delete +} + +// Modify returns whether we will receive updates for modifications +func (m MonitorSelect) Modify() bool { + if m.modify == nil { + return true + } + return *m.modify +} + +type monitorSelect struct { + Initial *bool `json:"initial,omitempty"` + Insert *bool `json:"insert,omitempty"` + Delete *bool `json:"delete,omitempty"` + Modify *bool `json:"modify,omitempty"` +} + +func (m MonitorSelect) MarshalJSON() ([]byte, error) { + ms := monitorSelect{ + Initial: m.initial, + Insert: m.insert, + Delete: m.delete, + Modify: m.modify, + } + return json.Marshal(ms) +} + +func (m *MonitorSelect) UnmarshalJSON(data []byte) error { + var ms monitorSelect + err := json.Unmarshal(data, &ms) + if err != nil { + return err + } + m.initial = ms.Initial + m.insert = ms.Insert + m.delete = ms.Delete + m.modify = ms.Modify + return nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go new file mode 100644 index 0000000000..66f8f047ac --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go @@ -0,0 +1,87 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type Mutator string + +const ( + // MutateOperationDelete is the delete mutator + MutateOperationDelete Mutator = "delete" + // MutateOperationInsert is the insert mutator + MutateOperationInsert Mutator = "insert" + // MutateOperationAdd is the add mutator + MutateOperationAdd Mutator = "+=" + // MutateOperationSubtract is the subtract mutator + MutateOperationSubtract Mutator = "-=" + // MutateOperationMultiply is the multiply mutator + MutateOperationMultiply Mutator = "*=" + // MutateOperationDivide is the divide mutator + MutateOperationDivide Mutator = "/=" + // MutateOperationModulo is the modulo mutator + MutateOperationModulo Mutator = "%=" +) + +// Mutation is described in RFC 7047: 5.1 +type Mutation struct { + Column string + Mutator Mutator + Value any +} + +// NewMutation returns a new mutation +func NewMutation(column string, mutator Mutator, value any) *Mutation { + return &Mutation{ + Column: column, + Mutator: mutator, + Value: value, + } +} + +// MarshalJSON marshals a mutation to a 3 element JSON array +func (m Mutation) MarshalJSON() ([]byte, error) { + v := []any{m.Column, m.Mutator, m.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Mutation +func (m *Mutation) UnmarshalJSON(b []byte) error { + var v []any + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + ok := false + m.Column, ok = v[0].(string) + if !ok { + return fmt.Errorf("expected column name %v to be a valid string", v[0]) + } + mutatorString, ok := v[1].(string) + if !ok { + return fmt.Errorf("expected mutator %v to be a valid string", v[1]) + } + mutator := Mutator(mutatorString) + switch mutator { + case MutateOperationDelete, + MutateOperationInsert, + MutateOperationAdd, + MutateOperationSubtract, + MutateOperationMultiply, + MutateOperationDivide, + MutateOperationModulo: + m.Mutator = mutator + default: + return fmt.Errorf("%s is not a valid mutator", mutator) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + m.Value = vv + return nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go new file mode 100644 index 0000000000..f1b5956514 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go @@ -0,0 +1,165 @@ +package ovsdb + +import ( + "fmt" +) + +// ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types +// throughout the operation. The caller must ensure each input operation has +// a valid UUID, which may be replaced if a previous operation created a +// matching named UUID mapping. Returns the updated operations or an error. +func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) { + uuidMap := make(map[string]string) + + // Pass 1: replace the named UUID with a real UUID for each operation and + // build the substitution map + for i := range ops { + op := &ops[i] + if op.Op != OperationInsert { + // Only Insert operations can specify a Named UUID + continue + } + + if err := ValidateUUID(op.UUID); err != nil { + return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err) + } + + if op.UUIDName != "" { + if uuid, ok := uuidMap[op.UUIDName]; ok { + if op.UUID != "" && op.UUID != uuid { + return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q", + op.UUIDName, uuid, op.UUID) + } + // If there's already a mapping for this named UUID use it + op.UUID = uuid + } else { + uuidMap[op.UUIDName] = op.UUID + } + op.UUIDName = "" + } + } + + // Pass 2: replace named UUIDs in operation fields with the real UUID + for i := range ops { + op := &ops[i] + tableSchema := schema.Table(op.Table) + if tableSchema == nil { + return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name) + } + + for i, condition := range op.Where { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap) + if err != nil { + return nil, err + } + op.Where[i].Value = newVal + } + for i, mutation := range op.Mutations { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap) + if err != nil { + return nil, err + } + op.Mutations[i].Value = newVal + } + for _, row := range op.Rows { + for k, v := range row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + row[k] = newVal + } + } + for k, v := range op.Row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + op.Row[k] = newVal + } + } + + return ops, nil +} + +func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value any, uuidMap map[string]string) (any, error) { + column := tableSchema.Column(columnName) + if column == nil { + return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName) + } + return expandNamedUUID(column, value, uuidMap), nil +} + +func expandNamedUUID(column *ColumnSchema, value any, namedUUIDs map[string]string) any { + var keyType, valType ExtendedType + + switch column.Type { + case TypeUUID: + keyType = column.Type + case TypeSet: + keyType = column.TypeObj.Key.Type + case TypeMap: + keyType = column.TypeObj.Key.Type + valType = column.TypeObj.Value.Type + } + + if valType == TypeUUID { + if m, ok := value.(OvsMap); ok { + for k, v := range m.GoMap { + if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok { + m.GoMap[newUUID] = m.GoMap[k] + delete(m.GoMap, k) + k = newUUID + } + if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok { + m.GoMap[k] = newUUID + } + } + } + } else if keyType == TypeUUID { + if ovsSet, ok := value.(OvsSet); ok { + for i, s := range ovsSet.GoSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + ovsSet.GoSet[i] = newUUID + } + } + return value + } else if strSet, ok := value.([]string); ok { + for i, s := range strSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + strSet[i] = newUUID.(string) + } + } + return value + } else if uuidSet, ok := value.([]UUID); ok { + for i, s := range uuidSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + uuidSet[i] = newUUID.(UUID) + } + } + return value + } + + if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok { + return newUUID + } + } + + // No expansion required; return original value + return value +} + +func expandNamedUUIDAtomic(valueType ExtendedType, value any, namedUUIDs map[string]string) (any, bool) { + if valueType == TypeUUID { + if uuid, ok := value.(UUID); ok { + if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok { + return UUID{GoUUID: newUUID}, true + } + } else if uuid, ok := value.(string); ok { + if newUUID, ok := namedUUIDs[uuid]; ok { + return newUUID, true + } + } + } + return value, false +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go new file mode 100644 index 0000000000..39e016b9f3 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go @@ -0,0 +1,141 @@ +package ovsdb + +import ( + "encoding/json" +) + +const ( + // OperationInsert is an insert operation + OperationInsert = "insert" + // OperationSelect is a select operation + OperationSelect = "select" + // OperationUpdate is an update operation + OperationUpdate = "update" + // OperationMutate is a mutate operation + OperationMutate = "mutate" + // OperationDelete is a delete operation + OperationDelete = "delete" + // OperationWait is a wait operation + OperationWait = "wait" + // OperationCommit is a commit operation + OperationCommit = "commit" + // OperationAbort is an abort operation + OperationAbort = "abort" + // OperationComment is a comment operation + OperationComment = "comment" + // OperationAssert is an assert operation + OperationAssert = "assert" +) + +// Operation represents an operation according to RFC7047 section 5.2 +type Operation struct { + Op string `json:"op"` + Table string `json:"table,omitempty"` + Row Row `json:"row,omitempty"` + Rows []Row `json:"rows,omitempty"` + Columns []string `json:"columns,omitempty"` + Mutations []Mutation `json:"mutations,omitempty"` + Timeout *int `json:"timeout,omitempty"` + Where []Condition `json:"where,omitempty"` + Until string `json:"until,omitempty"` + Durable *bool `json:"durable,omitempty"` + Comment *string `json:"comment,omitempty"` + Lock *string `json:"lock,omitempty"` + UUID string `json:"uuid,omitempty"` + UUIDName string `json:"uuid-name,omitempty"` + + // correlationID is a client-side mechanism to correlate a set of operations + // with their results. It is not serialized. + correlationID string `json:"-"` +} + +// MarshalJSON marshalls 'Operation' to a byte array +// For 'select' operations, we don't omit the 'Where' field +// to allow selecting all rows of a table +func (o Operation) MarshalJSON() ([]byte, error) { + type OpAlias Operation + switch o.Op { + case "select": + where := o.Where + if where == nil { + where = make([]Condition, 0) + } + return json.Marshal(&struct { + Where []Condition `json:"where"` + OpAlias + }{ + Where: where, + OpAlias: (OpAlias)(o), + }) + default: + return json.Marshal(&struct { + OpAlias + }{ + OpAlias: (OpAlias)(o), + }) + } +} + +// MonitorRequests represents a group of monitor requests according to RFC7047 +// We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it. +// The only option is to go with raw map[string]any option :-( that sucks ! +// Refer to client.go : MonitorAll() function for more details +type MonitorRequests struct { + Requests map[string]MonitorRequest `json:"requests"` +} + +// MonitorRequest represents a monitor request according to RFC7047 +type MonitorRequest struct { + Columns []string `json:"columns,omitempty"` + Where []Condition `json:"where,omitempty"` + Select *MonitorSelect `json:"select,omitempty"` +} + +// TransactResponse represents the response to a Transact Operation +type TransactResponse struct { + Result []OperationResult `json:"result"` + Error string `json:"error"` +} + +// OperationResult is the result of an Operation +type OperationResult struct { + Count int `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Details string `json:"details,omitempty"` + UUID UUID `json:"uuid,omitempty"` + Rows []Row `json:"rows,omitempty"` +} + +func ovsSliceToGoNotation(val any) (any, error) { + switch sl := val.(type) { + case []any: + bsliced, err := json.Marshal(sl) + if err != nil { + return nil, err + } + switch sl[0] { + case "uuid", "named-uuid": + var uuid UUID + err = json.Unmarshal(bsliced, &uuid) + return uuid, err + case "set": + var oSet OvsSet + err = json.Unmarshal(bsliced, &oSet) + return oSet, err + case "map": + var oMap OvsMap + err = json.Unmarshal(bsliced, &oMap) + return oMap, err + } + return val, nil + } + return val, nil +} + +func GetCorrelationID(op Operation) string { + return op.correlationID +} + +func SetCorrelationID(op *Operation, cid string) { + op.correlationID = cid +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go new file mode 100644 index 0000000000..809045ea09 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go @@ -0,0 +1,26 @@ +package ovsdb + +import "encoding/json" + +// Row is a table Row according to RFC7047 +type Row map[string]any + +// UnmarshalJSON unmarshalls a byte array to an OVSDB Row +func (r *Row) UnmarshalJSON(b []byte) (err error) { + *r = make(map[string]any) + var raw map[string]any + err = json.Unmarshal(b, &raw) + for key, val := range raw { + val, err = ovsSliceToGoNotation(val) + if err != nil { + return err + } + (*r)[key] = val + } + return err +} + +// NewRow returns a new empty row +func NewRow() Row { + return Row(make(map[string]any)) +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go new file mode 100644 index 0000000000..e33480da41 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go @@ -0,0 +1,79 @@ +package ovsdb + +const ( + // MonitorRPC is the monitor RPC method + MonitorRPC = "monitor" + // ConditionalMonitorRPC is the monitor_cond + ConditionalMonitorRPC = "monitor_cond" + // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method + ConditionalMonitorSinceRPC = "monitor_cond_since" +) + +// NewEchoArgs creates a new set of arguments for an echo RPC +func NewEchoArgs() []any { + return []any{"libovsdb echo"} +} + +// NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC +func NewGetSchemaArgs(schema string) []any { + return []any{schema} +} + +// NewTransactArgs creates a new set of arguments for a transact RPC +func NewTransactArgs(database string, operations ...Operation) []any { + dbSlice := make([]any, 1) + dbSlice[0] = database + + opsSlice := make([]any, len(operations)) + for i, d := range operations { + opsSlice[i] = d + } + + ops := append(dbSlice, opsSlice...) + return ops +} + +// NewCancelArgs creates a new set of arguments for a cancel RPC +func NewCancelArgs(id any) []any { + return []any{id} +} + +// NewMonitorArgs creates a new set of arguments for a monitor RPC +func NewMonitorArgs(database string, value any, requests map[string]MonitorRequest) []any { + return []any{database, value, requests} +} + +// NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC +func NewMonitorCondSinceArgs(database string, value any, requests map[string]MonitorRequest, lastTransactionID string) []any { + return []any{database, value, requests, lastTransactionID} +} + +// NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC +func NewMonitorCancelArgs(value any) []any { + return []any{value} +} + +// NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC +func NewLockArgs(id any) []any { + return []any{id} +} + +// NotificationHandler is the interface that must be implemented to receive notifications +type NotificationHandler interface { + // RFC 7047 section 4.1.6 Update Notification + Update(context any, tableUpdates TableUpdates) + + // ovsdb-server.7 update2 notifications + Update2(context any, tableUpdates TableUpdates2) + + // RFC 7047 section 4.1.9 Locked Notification + Locked([]any) + + // RFC 7047 section 4.1.10 Stolen Notification + Stolen([]any) + + // RFC 7047 section 4.1.11 Echo Notification + Echo([]any) + + Disconnected() +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go new file mode 100644 index 0000000000..0c808d1d79 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go @@ -0,0 +1,661 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "io" + "math" + "os" + "strings" +) + +// DatabaseSchema is a database schema according to RFC7047 +type DatabaseSchema struct { + Name string `json:"name"` + Version string `json:"version"` + Tables map[string]TableSchema `json:"tables"` + allTablesRoot *bool +} + +// UUIDColumn is a static column that represents the _uuid column, common to all tables +var UUIDColumn = ColumnSchema{ + Type: TypeUUID, +} + +// Table returns a TableSchema Schema for a given table and column name +func (schema DatabaseSchema) Table(tableName string) *TableSchema { + if table, ok := schema.Tables[tableName]; ok { + return &table + } + return nil +} + +// IsRoot whether a table is root or not +func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) { + t := schema.Table(tableName) + if t == nil { + return false, fmt.Errorf("Table %s not in schame", tableName) + } + if t.IsRoot { + return true, nil + } + // As per RFC7047, for compatibility with schemas created before + // "isRoot" was introduced, if "isRoot" is omitted or false in every + // in a given , then every table is part + // of the root set. + if schema.allTablesRoot == nil { + allTablesRoot := true + for _, tSchema := range schema.Tables { + if tSchema.IsRoot { + allTablesRoot = false + break + } + } + schema.allTablesRoot = &allTablesRoot + } + return *schema.allTablesRoot, nil +} + +// Print will print the contents of the DatabaseSchema +func (schema DatabaseSchema) Print(w io.Writer) { + fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version) + for table, tableSchema := range schema.Tables { + fmt.Fprintf(w, "\t %s", table) + if len(tableSchema.Indexes) > 0 { + fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes) + } else { + fmt.Fprintf(w, "\n") + } + for column, columnSchema := range tableSchema.Columns { + fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema) + } + } +} + +// SchemaFromFile returns a DatabaseSchema from a file +func SchemaFromFile(f *os.File) (DatabaseSchema, error) { + data, err := io.ReadAll(f) + if err != nil { + return DatabaseSchema{}, err + } + var schema DatabaseSchema + err = json.Unmarshal(data, &schema) + if err != nil { + return DatabaseSchema{}, err + } + return schema, nil +} + +// ValidateOperations performs basic validation for operations against a DatabaseSchema +func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool { + for _, op := range operations { + switch op.Op { + case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait: + continue + case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete: + table, ok := schema.Tables[op.Table] + if ok { + for column := range op.Row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + for _, row := range op.Rows { + for column := range row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } + for _, column := range op.Columns { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } else { + return false + } + } + } + return true +} + +// TableSchema is a table schema according to RFC7047 +type TableSchema struct { + Columns map[string]*ColumnSchema `json:"columns"` + Indexes [][]string `json:"indexes,omitempty"` + IsRoot bool `json:"isRoot,omitempty"` +} + +// Column returns the Column object for a specific column name +func (t TableSchema) Column(columnName string) *ColumnSchema { + if columnName == "_uuid" { + return &UUIDColumn + } + if column, ok := t.Columns[columnName]; ok { + return column + } + return nil +} + +/*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type +can also hold other more complex types such as set, enum and map. The way to determine the type +depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage +of this library, we define an ExtendedType that includes all possible column types (including +atomic fields). +*/ + +// ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set +type ExtendedType = string + +// RefType is used to define the possible RefTypes +type RefType = string + +// unlimited is not constant as we can't take the address of int constants +var ( + // Unlimited is used to express unlimited "Max" + Unlimited = -1 +) + +const ( + unlimitedString = "unlimited" + //Strong RefType + Strong RefType = "strong" + //Weak RefType + Weak RefType = "weak" + + //ExtendedType associated with Atomic Types + + //TypeInteger is equivalent to 'int' + TypeInteger ExtendedType = "integer" + //TypeReal is equivalent to 'float64' + TypeReal ExtendedType = "real" + //TypeBoolean is equivalent to 'bool' + TypeBoolean ExtendedType = "boolean" + //TypeString is equivalent to 'string' + TypeString ExtendedType = "string" + //TypeUUID is equivalent to 'libovsdb.UUID' + TypeUUID ExtendedType = "uuid" + + //Extended Types used to summarize the internal type of the field. + + //TypeEnum is an enumerator of type defined by Key.Type + TypeEnum ExtendedType = "enum" + //TypeMap is a map whose type depend on Key.Type and Value.Type + TypeMap ExtendedType = "map" + //TypeSet is a set whose type depend on Key.Type + TypeSet ExtendedType = "set" +) + +// BaseType is a base-type structure as per RFC7047 +type BaseType struct { + Type string + Enum []any + minReal *float64 + maxReal *float64 + minInteger *int + maxInteger *int + minLength *int + maxLength *int + refTable *string + refType *RefType +} + +func (b *BaseType) simpleAtomic() bool { + return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil +} + +// MinReal returns the minimum real value +// RFC7047 does not define a default, but we assume this to be +// the smallest non zero value a float64 could hold +func (b *BaseType) MinReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.minReal != nil { + return *b.minReal, nil + } + return math.SmallestNonzeroFloat64, nil +} + +// MaxReal returns the maximum real value +// RFC7047 does not define a default, but this would be the maximum +// value held by a float64 +func (b *BaseType) MaxReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.maxReal != nil { + return *b.maxReal, nil + } + return math.MaxFloat64, nil +} + +// MinInteger returns the minimum integer value +// RFC7047 specifies the minimum to be -2^63 +func (b *BaseType) MinInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.minInteger != nil { + return *b.minInteger, nil + } + return math.MinInt64, nil +} + +// MaxInteger returns the maximum integer value +// RFC7047 specifies the maximum to be 2^63-1 +func (b *BaseType) MaxInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.maxInteger != nil { + return *b.maxInteger, nil + } + return math.MaxInt64, nil +} + +// MinLength returns the minimum string length +// RFC7047 doesn't specify a default, but we assume +// that it must be >= 0 +func (b *BaseType) MinLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not an string", b.Type) + } + if b.minLength != nil { + return *b.minLength, nil + } + return 0, nil +} + +// MaxLength returns the maximum string length +// RFC7047 doesn't specify a default, but we assume +// that it must be 2^63-1 +func (b *BaseType) MaxLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not a string", b.Type) + } + if b.maxLength != nil { + return *b.maxLength, nil + } + return math.MaxInt64, nil +} + +// RefTable returns the table to which a UUID type refers +// It will return an empty string if not set +func (b *BaseType) RefTable() (string, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refTable != nil { + return *b.refTable, nil + } + return "", nil +} + +// RefType returns the reference type for a UUID field +// RFC7047 infers the RefType is strong if omitted +func (b *BaseType) RefType() (RefType, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refType != nil { + return *b.refType, nil + } + return Strong, nil +} + +// UnmarshalJSON unmarshals a json-formatted base type +func (b *BaseType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + b.Type = s + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + // temporary type to avoid recursive call to unmarshal + var bt struct { + Type string `json:"type"` + Enum any `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + } + err := json.Unmarshal(data, &bt) + if err != nil { + return err + } + + if bt.Enum != nil { + // 'enum' is a list or a single element representing a list of exactly one element + switch bt.Enum.(type) { + case []any: + // it's an OvsSet + oSet := bt.Enum.([]any) + innerSet := oSet[1].([]any) + b.Enum = make([]any, len(innerSet)) + + // json unmarshal will convert all numeric types to float64, Convert float64 to int if the base type is integer + if bt.Type == "integer" { + for i, v := range innerSet { + if f, ok := v.(float64); ok { + b.Enum[i] = int(f) + } else { + b.Enum[i] = v + } + } + } else { + copy(b.Enum, innerSet) + } + default: + // Single element enum + if bt.Type == "integer" { + if f, ok := bt.Enum.(float64); ok { + b.Enum = []any{int(f)} + } else { + b.Enum = []any{bt.Enum} + } + } else { + b.Enum = []any{bt.Enum} + } + } + } + b.Type = bt.Type + b.minReal = bt.MinReal + b.maxReal = bt.MaxReal + b.minInteger = bt.MinInteger + b.maxInteger = bt.MaxInteger + b.minLength = bt.MaxLength + b.maxLength = bt.MaxLength + b.refTable = bt.RefTable + b.refType = bt.RefType + return nil +} + +// MarshalJSON marshals a base type to JSON +func (b BaseType) MarshalJSON() ([]byte, error) { + j := struct { + Type string `json:"type,omitempty"` + Enum *OvsSet `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + }{ + Type: b.Type, + MinReal: b.minReal, + MaxReal: b.maxReal, + MinInteger: b.minInteger, + MaxInteger: b.maxInteger, + MinLength: b.maxLength, + MaxLength: b.maxLength, + RefTable: b.refTable, + RefType: b.refType, + } + if len(b.Enum) > 0 { + set, err := NewOvsSet(b.Enum) + if err != nil { + return nil, err + } + j.Enum = &set + } + return json.Marshal(j) +} + +// ColumnType is a type object as per RFC7047 +// "key": required +// "value": optional +// "min": optional (default: 1) +// "max": or "unlimited" optional (default: 1) +type ColumnType struct { + Key *BaseType + Value *BaseType + min *int + max *int +} + +// Max returns the maximum value of a ColumnType. -1 is Unlimited +func (c *ColumnType) Max() int { + if c.max == nil { + return 1 + } + return *c.max +} + +// Min returns the minimum value of a ColumnType +func (c *ColumnType) Min() int { + if c.min == nil { + return 1 + } + return *c.min +} + +// UnmarshalJSON unmarshals a json-formatted column type +func (c *ColumnType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + c.Key = &BaseType{Type: s} + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + var colType struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value"` + Min *int `json:"min"` + Max any `json:"max"` + } + err := json.Unmarshal(data, &colType) + if err != nil { + return err + } + c.Key = colType.Key + c.Value = colType.Value + c.min = colType.Min + switch v := colType.Max.(type) { + case string: + if v == unlimitedString { + c.max = &Unlimited + } else { + return fmt.Errorf("unexpected string value in max field") + } + case float64: + i := int(v) + c.max = &i + default: + c.max = nil + } + return nil +} + +// MarshalJSON marshalls a column type to JSON +func (c ColumnType) MarshalJSON() ([]byte, error) { + if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() { + return json.Marshal(c.Key.Type) + } + if c.Max() == Unlimited { + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max string `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: unlimitedString, + } + return json.Marshal(&colType) + } + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: c.max, + } + return json.Marshal(&colType) +} + +// ColumnSchema is a column schema according to RFC7047 +type ColumnSchema struct { + // According to RFC7047, "type" field can be, either an + // Or a ColumnType defined below. To try to simplify the usage, the + // json message will be parsed manually and Type will indicate the "extended" + // type. Depending on its value, more information may be available in TypeObj. + // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values + Type ExtendedType + TypeObj *ColumnType + ephemeral *bool + mutable *bool +} + +// Mutable returns whether a column is mutable +func (c *ColumnSchema) Mutable() bool { + if c.mutable != nil { + return *c.mutable + } + // default true + return true +} + +// Ephemeral returns whether a column is ephemeral +func (c *ColumnSchema) Ephemeral() bool { + if c.ephemeral != nil { + return *c.ephemeral + } + // default false + return false +} + +// UnmarshalJSON unmarshals a json-formatted column +func (c *ColumnSchema) UnmarshalJSON(data []byte) error { + // ColumnJSON represents the known json values for a Column + var colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + + // Unmarshal known keys + if err := json.Unmarshal(data, &colJSON); err != nil { + return fmt.Errorf("cannot parse column object %s", err) + } + + c.ephemeral = colJSON.Ephemeral + c.mutable = colJSON.Mutable + c.TypeObj = colJSON.Type + + // Infer the ExtendedType from the TypeObj + if c.TypeObj.Value != nil { + c.Type = TypeMap + } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 { + c.Type = TypeSet + } else if len(c.TypeObj.Key.Enum) > 0 { + c.Type = TypeEnum + } else { + c.Type = c.TypeObj.Key.Type + } + return nil +} + +// MarshalJSON marshalls a column schema to JSON +func (c ColumnSchema) MarshalJSON() ([]byte, error) { + type colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + column := colJSON{ + Type: c.TypeObj, + Ephemeral: c.ephemeral, + Mutable: c.mutable, + } + return json.Marshal(column) +} + +// String returns a string representation of the (native) column type +func (c *ColumnSchema) String() string { + var flags []string + var flagStr string + var typeStr string + if c.Ephemeral() { + flags = append(flags, "E") + } + if c.Mutable() { + flags = append(flags, "M") + } + if len(flags) > 0 { + flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ",")) + } + + switch c.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeString: + typeStr = string(c.Type) + case TypeUUID: + if c.TypeObj != nil && c.TypeObj.Key != nil { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype := "" + if s, err := c.TypeObj.Key.RefType(); err != nil { + reftype = s + } + typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype) + } else { + typeStr = "uuid" + } + + case TypeEnum: + typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum) + case TypeMap: + typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type) + case TypeSet: + var keyStr string + if c.TypeObj.Key.Type == TypeUUID { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype, _ := c.TypeObj.Key.RefType() + keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype) + } else { + keyStr = string(c.TypeObj.Key.Type) + } + typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max()) + default: + panic(fmt.Sprintf("Unsupported type %s", c.Type)) + } + + return strings.Join([]string{typeStr, flagStr}, " ") +} + +func isAtomicType(atype string) bool { + switch atype { + case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID: + return true + default: + return false + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore new file mode 100644 index 0000000000..33f8bff56f --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema \ No newline at end of file diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go new file mode 100644 index 0000000000..d3eb662898 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go @@ -0,0 +1,182 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const DatabaseTable = "Database" + +type ( + DatabaseModel = string +) + +var ( + DatabaseModelStandalone DatabaseModel = "standalone" + DatabaseModelClustered DatabaseModel = "clustered" + DatabaseModelRelay DatabaseModel = "relay" +) + +// Database defines an object in Database table +type Database struct { + UUID string `ovsdb:"_uuid"` + Cid *string `ovsdb:"cid"` + Connected bool `ovsdb:"connected"` + Index *int `ovsdb:"index"` + Leader bool `ovsdb:"leader"` + Model DatabaseModel `ovsdb:"model" validate:"oneof='standalone' 'clustered' 'relay'"` + Name string `ovsdb:"name"` + Schema *string `ovsdb:"schema"` + Sid *string `ovsdb:"sid"` +} + +func (a *Database) GetUUID() string { + return a.UUID +} + +func (a *Database) GetCid() *string { + return a.Cid +} + +func copyDatabaseCid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseCid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetConnected() bool { + return a.Connected +} + +func (a *Database) GetIndex() *int { + return a.Index +} + +func copyDatabaseIndex(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseIndex(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetLeader() bool { + return a.Leader +} + +func (a *Database) GetModel() DatabaseModel { + return a.Model +} + +func (a *Database) GetName() string { + return a.Name +} + +func (a *Database) GetSchema() *string { + return a.Schema +} + +func copyDatabaseSchema(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSchema(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetSid() *string { + return a.Sid +} + +func copyDatabaseSid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) DeepCopyInto(b *Database) { + *b = *a + b.Cid = copyDatabaseCid(a.Cid) + b.Index = copyDatabaseIndex(a.Index) + b.Schema = copyDatabaseSchema(a.Schema) + b.Sid = copyDatabaseSid(a.Sid) +} + +func (a *Database) DeepCopy() *Database { + b := new(Database) + a.DeepCopyInto(b) + return b +} + +func (a *Database) CloneModelInto(b model.Model) { + c := b.(*Database) + a.DeepCopyInto(c) +} + +func (a *Database) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Database) Equals(b *Database) bool { + return a.UUID == b.UUID && + equalDatabaseCid(a.Cid, b.Cid) && + a.Connected == b.Connected && + equalDatabaseIndex(a.Index, b.Index) && + a.Leader == b.Leader && + a.Model == b.Model && + a.Name == b.Name && + equalDatabaseSchema(a.Schema, b.Schema) && + equalDatabaseSid(a.Sid, b.Sid) +} + +func (a *Database) EqualsModel(b model.Model) bool { + c := b.(*Database) + return a.Equals(c) +} + +var _ model.CloneableModel = &Database{} +var _ model.ComparableModel = &Database{} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go new file mode 100644 index 0000000000..5923af60ab --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go @@ -0,0 +1,6 @@ +package serverdb + +// server_model is a database model for the special _Server database that all +// ovsdb instances export. It reports back status of the server process itself. + +//go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go new file mode 100644 index 0000000000..c0aeeb74c3 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go @@ -0,0 +1,99 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import ( + "encoding/json" + + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("_Server", map[string]model.Model{ + "Database": &Database{}, + }) +} + +var schema = `{ + "name": "_Server", + "version": "1.2.0", + "tables": { + "Database": { + "columns": { + "cid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + }, + "connected": { + "type": "boolean" + }, + "index": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "leader": { + "type": "boolean" + }, + "model": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "standalone", + "clustered", + "relay" + ] + ] + } + } + }, + "name": { + "type": "string" + }, + "schema": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "sid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go new file mode 100644 index 0000000000..07dafaa6a6 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go @@ -0,0 +1,109 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsSet is an OVSDB style set +// RFC 7047 has a weird (but understandable) notation for set as described as : +// Either an , representing a set with exactly one element, or +// a 2-element JSON array that represents a database set value. The +// first element of the array must be the string "set", and the +// second element must be an array of zero or more s giving the +// values in the set. All of the s must have the same type. +type OvsSet struct { + GoSet []any +} + +// NewOvsSet creates a new OVSDB style set from a Go interface (object) +func NewOvsSet(obj any) (OvsSet, error) { + ovsSet := make([]any, 0) + var v reflect.Value + if reflect.TypeOf(obj).Kind() == reflect.Ptr { + v = reflect.ValueOf(obj).Elem() + if v.Kind() == reflect.Invalid { + // must be a nil pointer, so just return an empty set + return OvsSet{ovsSet}, nil + } + } else { + v = reflect.ValueOf(obj) + } + + switch v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + ovsSet = append(ovsSet, v.Index(i).Interface()) + } + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Bool: + ovsSet = append(ovsSet, v.Interface()) + case reflect.Struct: + if v.Type() == reflect.TypeOf(UUID{}) { + ovsSet = append(ovsSet, v.Interface()) + } else { + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + default: + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + return OvsSet{ovsSet}, nil +} + +// MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array +func (o OvsSet) MarshalJSON() ([]byte, error) { + switch l := len(o.GoSet); { + case l == 1: + return json.Marshal(o.GoSet[0]) + case l > 0: + var oSet []any + oSet = append(oSet, "set") + oSet = append(oSet, o.GoSet) + return json.Marshal(oSet) + } + return []byte("[\"set\",[]]"), nil +} + +// UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set +func (o *OvsSet) UnmarshalJSON(b []byte) (err error) { + o.GoSet = make([]any, 0) + addToSet := func(o *OvsSet, v any) error { + goVal, err := ovsSliceToGoNotation(v) + if err == nil { + o.GoSet = append(o.GoSet, goVal) + } + return err + } + + var inter any + if err = json.Unmarshal(b, &inter); err != nil { + return err + } + switch inter.(type) { + case []any: + var oSet []any + oSet = inter.([]any) + // it's a single uuid object + if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") { + return addToSet(o, UUID{GoUUID: oSet[1].(string)}) + } + if oSet[0] != "set" { + // it is a slice, but is not a set + return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)} + } + innerSet := oSet[1].([]any) + for _, val := range innerSet { + err := addToSet(o, val) + if err != nil { + return err + } + } + return err + default: + // it is a single object + return addToSet(o, inter) + } +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go new file mode 100644 index 0000000000..0766fda08c --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go @@ -0,0 +1,51 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type MonitorCondSinceReply struct { + Found bool + LastTransactionID string + Updates TableUpdates2 +} + +func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) { + v := []any{m.Found, m.LastTransactionID, m.Updates} + return json.Marshal(v) +} + +func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error { + var v []json.RawMessage + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + + var found bool + err = json.Unmarshal(v[0], &found) + if err != nil { + return err + } + + var lastTransactionID string + err = json.Unmarshal(v[1], &lastTransactionID) + if err != nil { + return err + } + + var updates TableUpdates2 + err = json.Unmarshal(v[2], &updates) + if err != nil { + return err + } + + m.Found = found + m.LastTransactionID = lastTransactionID + m.Updates = updates + return nil +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go new file mode 100644 index 0000000000..5a47d0c44a --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go @@ -0,0 +1,35 @@ +package ovsdb + +// TableUpdates is an object that maps from a table name to a +// TableUpdate +type TableUpdates map[string]TableUpdate + +// TableUpdate is an object that maps from the row's UUID to a +// RowUpdate +type TableUpdate map[string]*RowUpdate + +// RowUpdate represents a row update according to RFC7047 +type RowUpdate struct { + New *Row `json:"new,omitempty"` + Old *Row `json:"old,omitempty"` +} + +// Insert returns true if this is an update for an insert operation +func (r RowUpdate) Insert() bool { + return r.New != nil && r.Old == nil +} + +// Modify returns true if this is an update for a modify operation +func (r RowUpdate) Modify() bool { + return r.New != nil && r.Old != nil +} + +// Delete returns true if this is an update for a delete operation +func (r RowUpdate) Delete() bool { + return r.New == nil && r.Old != nil +} + +func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) { + r.Old = ru2.Old + r.New = ru2.New +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go new file mode 100644 index 0000000000..a040894c97 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go @@ -0,0 +1,19 @@ +package ovsdb + +// TableUpdates2 is an object that maps from a table name to a +// TableUpdate2 +type TableUpdates2 map[string]TableUpdate2 + +// TableUpdate2 is an object that maps from the row's UUID to a +// RowUpdate2 +type TableUpdate2 map[string]*RowUpdate2 + +// RowUpdate2 represents a row update according to ovsdb-server.7 +type RowUpdate2 struct { + Initial *Row `json:"initial,omitempty"` + Insert *Row `json:"insert,omitempty"` + Modify *Row `json:"modify,omitempty"` + Delete *Row `json:"delete,omitempty"` + Old *Row `json:"-"` + New *Row `json:"-"` +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go new file mode 100644 index 0000000000..6bc4636537 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go @@ -0,0 +1,59 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "regexp" +) + +var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) + +// UUID is a UUID according to RFC7047 +type UUID struct { + GoUUID string `json:"uuid"` +} + +// MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array +func (u UUID) MarshalJSON() ([]byte, error) { + var uuidSlice []string + err := ValidateUUID(u.GoUUID) + if err == nil { + uuidSlice = []string{"uuid", u.GoUUID} + } else { + uuidSlice = []string{"named-uuid", u.GoUUID} + } + + return json.Marshal(uuidSlice) +} + +// UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID +func (u *UUID) UnmarshalJSON(b []byte) (err error) { + var ovsUUID []string + if err := json.Unmarshal(b, &ovsUUID); err == nil { + u.GoUUID = ovsUUID[1] + } + return err +} + +func ValidateUUID(uuid string) error { + if len(uuid) != 36 { + return fmt.Errorf("uuid exceeds 36 characters") + } + + if !validUUID.MatchString(uuid) { + return fmt.Errorf("uuid does not match regexp") + } + + return nil +} + +func IsNamedUUID(uuid string) bool { + return len(uuid) > 0 && !validUUID.MatchString(uuid) +} + +func IsValidUUID(uuid string) bool { + if err := ValidateUUID(uuid); err != nil { + return false + } + return true +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go new file mode 100644 index 0000000000..f78a9ce628 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go @@ -0,0 +1,209 @@ +package updates + +import "reflect" + +// difference between value 'a' and value 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the +// difference is 'b' in which case 'b' is returned unmodified. Also returns a +// boolean indicating if there is an actual difference. +func difference(a, b any) (any, bool) { + return mergeDifference(nil, a, b) +} + +// applyDifference returns the result of applying difference 'd' to value 'v' +// along with a boolean indicating if 'v' was changed. +func applyDifference(v, d any) (any, bool) { + if d == nil { + return v, false + } + // difference can be applied with the same algorithm used to calculate it + // f(x,f(x,y))=y + result, changed := difference(v, d) + dv := reflect.ValueOf(d) + switch dv.Kind() { + case reflect.Slice: + fallthrough + case reflect.Map: + // but we need to tweak the interpretation of change for map and slices: + // when there is no difference between the value and non-empty delta, it + // actually means the value needs to be emptied so there is actually a + // change + if !changed && dv.Len() > 0 { + return result, true + } + // there are no changes when delta is empty + return result, changed && dv.Len() > 0 + } + return result, changed +} + +// mergeDifference, given an original value 'o' and two differences 'a' and 'b', +// returns a new equivalent difference that when applied on 'o' it would have +// the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func mergeDifference(o, a, b any) (any, bool) { + kind := reflect.ValueOf(b).Kind() + if kind == reflect.Invalid { + kind = reflect.ValueOf(a).Kind() + } + switch kind { + case reflect.Invalid: + return nil, false + case reflect.Slice: + // set differences are transitive + return setDifference(a, b) + case reflect.Map: + return mergeMapDifference(o, a, b) + case reflect.Array: + panic("Not implemented") + default: + return mergeAtomicDifference(o, a, b) + } +} + +// setDifference calculates the difference between set 'a' and set 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the difference +// is 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func setDifference(a, b any) (any, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two sets are all elements that only belong to one + // of the sets. + difference := make(map[any]struct{}, bv.Len()) + for i := 0; i < bv.Len(); i++ { + // supossedly we are working with comparable atomic types with no + // pointers so we can use the values as map key + difference[bv.Index(i).Interface()] = struct{}{} + } + j := av.Len() + for i := 0; i < j; { + vv := av.Index(i) + vi := vv.Interface() + if _, ok := difference[vi]; ok { + // this value of 'a' is in 'b', so remove it from 'a'; to do that, + // overwrite it with the last value and re-evaluate + vv.Set(av.Index(j - 1)) + // decrease where the last 'a' value is at + j-- + // remove from 'b' values + delete(difference, vi) + } else { + // this value of 'a' is not in 'b', evaluate the next value + i++ + } + } + // trim the slice to the actual values held + av = av.Slice(0, j) + for item := range difference { + // this value of 'b' is not in 'a', so add it + av = reflect.Append(av, reflect.ValueOf(item)) + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeMapDifference, given an original map 'o' and two differences 'a' and +// 'b', returns a new equivalent difference that when applied on 'o' it would +// have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. +// Returns a boolean indicating if there is an actual difference. +func mergeMapDifference(o, a, b any) (any, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + ov := reflect.ValueOf(o) + if !ov.IsValid() { + ov = reflect.Zero(av.Type()) + } + + // From + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two maps are all key-value pairs whose keys + // appears in only one of the maps, plus the key-value pairs whose keys + // appear in both maps but with different values. For the latter elements, + // includes the value from the new column. + + // We can assume that difference is a transitive operation so we calculate + // the difference between 'a' and 'b' but we need to handle exceptions when + // the same key is present in all values. + for i := bv.MapRange(); i.Next(); { + kv := i.Key() + bvv := i.Value() + avv := av.MapIndex(kv) + ovv := ov.MapIndex(kv) + // supossedly we are working with comparable types with no pointers so + // we can compare directly here + switch { + case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would restore key to the original value, delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would remove key, set in 'a' with 'o' value + av.SetMapIndex(kv, ovv) + case avv.IsValid() && avv.Interface() == bvv.Interface(): + // key/value is in 'a' and 'b', delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + default: + // key/value in 'b' is not in 'a', set in 'a' with 'b' value + av.SetMapIndex(kv, bvv) + } + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeAtomicDifference, given an original atomic value 'o' and two differences +// 'a' and 'b', returns a new equivalent difference that when applied on 'o' it +// would have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// Returns a boolean indicating if there is an actual difference. +func mergeAtomicDifference(o, a, b any) (any, bool) { + if o != nil { + return b, !reflect.DeepEqual(o, b) + } + return b, !reflect.DeepEqual(a, b) +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go new file mode 100644 index 0000000000..3e6fe18a0b --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go @@ -0,0 +1,15 @@ +/* +Package updates provides an utility to perform and aggregate model updates. + +As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via +the corresponding Add methods. + +As output, it supports both OVSDB RowUpdate2 as well as model notation via the +corresponding ForEach iterative methods. + +Several updates can be added and will be merged with any previous updates even +if they are for the same model. If several updates for the same model are +aggregated, the user is responsible that the provided model to be updated +matches the updated model of the previous update. +*/ +package updates diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go new file mode 100644 index 0000000000..a9eeb6d72e --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go @@ -0,0 +1,160 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { + // handle model update + switch { + case b.old == nil && b.new == nil: + // noop + case a.old == nil && a.new == nil: + // first op + a.old = b.old + a.new = b.new + case a.new != nil && b.old != nil && b.new != nil: + // update after an insert or an update + a.new = b.new + case b.old != nil && b.new == nil: + // a final delete + a.new = nil + default: + return modelUpdate{}, fmt.Errorf("sequence of updates not supported") + } + + // handle row update + ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2) + if err != nil { + return modelUpdate{}, err + } + if ru2 == nil { + return modelUpdate{}, nil + } + a.rowUpdate2 = ru2 + + return a, nil +} + +func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) { + switch { + case b == nil: + // noop + case a == nil: + // first op + a = b + case a.Insert != nil && b.Modify != nil: + // update after an insert + a.New = b.New + a.Insert = b.New + case a.Modify != nil && b.Modify != nil: + // update after update + a.New = b.New + a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify) + if a.Modify == nil { + // we merged two modifications that brought back the row to its + // original value which is a no op + a = nil + } + case a.Insert != nil && b.Delete != nil: + // delete after insert + a = nil + case b.Delete != nil: + // a final delete + a.Initial = nil + a.Insert = nil + a.Modify = nil + a.New = nil + a.Delete = b.Delete + default: + return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported") + } + return a, nil +} + +// mergeModifyRow merges two modification rows 'a' and 'b' with respect an +// original row 'o'. Two modifications that restore the original value cancel +// each other and won't be included in the result. Returns nil if there are no +// resulting modifications. +func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row { + original := *o + aMod := *a + bMod := *b + for k, v := range bMod { + if _, ok := aMod[k]; !ok { + aMod[k] = v + continue + } + + var result any + var changed bool + + // handle maps or sets first + switch v.(type) { + // difference only supports set or map values that are comparable with + // no pointers. This should be currently fine because the set or map + // values should only be non pointer atomic types or the UUID struct. + case ovsdb.OvsSet: + aSet := aMod[k].(ovsdb.OvsSet) + bSet := v.(ovsdb.OvsSet) + // handle sets of multiple values, single value sets are handled as + // atomic values + if ts.Column(k).TypeObj.Max() != 1 { + // set difference is a fully transitive operation so we dont + // need to do anything special to merge two differences + result, changed = setDifference(aSet.GoSet, bSet.GoSet) + result = ovsdb.OvsSet{GoSet: result.([]any)} + } + case ovsdb.OvsMap: + aMap := aMod[k].(ovsdb.OvsMap) + bMap := v.(ovsdb.OvsMap) + var originalMap ovsdb.OvsMap + if v, ok := original[k]; ok { + originalMap = v.(ovsdb.OvsMap) + } + // map difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap) + result = ovsdb.OvsMap{GoMap: result.(map[any]any)} + } + + // was neither a map nor a set + if result == nil { + // atomic difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + o := original[k] + if o == nil { + // assume zero value if original does not have the column + o = reflect.Zero(reflect.TypeOf(v)).Interface() + } + if set, ok := o.(ovsdb.OvsSet); ok { + // atomic optional values are cleared out with an empty set + // if the original value was also cleared out, use an empty set + // instead of a nil set so that mergeAtomicDifference notices + // that we are returning to the original value + if set.GoSet == nil { + set.GoSet = []any{} + } + o = set + } + result, changed = mergeAtomicDifference(o, aMod[k], v) + } + + if !changed { + delete(aMod, k) + continue + } + aMod[k] = result + } + + if len(aMod) == 0 { + return nil + } + + return a +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go new file mode 100644 index 0000000000..57097f55c8 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go @@ -0,0 +1,297 @@ +package updates + +import ( + "reflect" + + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len())) + return v, true + } + } + return a, false +} + +func insertToSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + return a, false + } + } + return reflect.Append(a, b), true +} + +func mutate(current any, mutator ovsdb.Mutator, value any) (any, any) { + switch current.(type) { + case bool, string: + return current, value + } + switch mutator { + case ovsdb.MutateOperationInsert: + // for insert, the delta will be the new value added + return mutateInsert(current, value) + case ovsdb.MutateOperationDelete: + return mutateDelete(current, value) + case ovsdb.MutateOperationAdd: + // for add, the delta is the new value + newValue := mutateAdd(current, value) + return newValue, newValue + case ovsdb.MutateOperationSubtract: + // for subtract, the delta is the new value + newValue := mutateSubtract(current, value) + return newValue, newValue + case ovsdb.MutateOperationMultiply: + newValue := mutateMultiply(current, value) + return newValue, newValue + case ovsdb.MutateOperationDivide: + newValue := mutateDivide(current, value) + return newValue, newValue + case ovsdb.MutateOperationModulo: + newValue := mutateModulo(current, value) + return newValue, newValue + } + return current, value +} + +func mutateInsert(current, value any) (any, any) { + switch current.(type) { + case int, float64: + return current, current + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := insertToSlice(vc, vv) + var diff any + if ok { + diff = value + } + return v.Interface(), diff + } + if !vc.IsValid() { + if vv.IsValid() { + return vv.Interface(), vv.Interface() + } + return nil, nil + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = insertToSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + if vc.IsNil() && vv.Len() > 0 { + return value, value + } + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + k := iter.Key() + if !vc.MapIndex(k).IsValid() { + vc.SetMapIndex(k, iter.Value()) + diff.SetMapIndex(k, iter.Value()) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateDelete(current, value any) (any, any) { + switch current.(type) { + case int, float64: + return current, nil + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := removeFromSlice(vc, vv) + diff := value + if !ok { + diff = nil + } + return v.Interface(), diff + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = removeFromSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) { + diff := reflect.MakeMap(vc.Type()) + for i := 0; i < vv.Len(); i++ { + if vc.MapIndex(vv.Index(i)).IsValid() { + diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i))) + vc.SetMapIndex(vv.Index(i), reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + vvk := iter.Key() + vvv := iter.Value() + vcv := vc.MapIndex(vvk) + if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) { + diff.SetMapIndex(vvk, vcv) + vc.SetMapIndex(vvk, reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateAdd(current, value any) any { + if i, ok := current.(int); ok { + v := value.(int) + return i + v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i + v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j + v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j + v + } + return is + } + return current +} + +func mutateSubtract(current, value any) any { + if i, ok := current.(int); ok { + v := value.(int) + return i - v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i - v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j - v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j - v + } + return is + } + return current +} + +func mutateMultiply(current, value any) any { + if i, ok := current.(int); ok { + v := value.(int) + return i * v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i * v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j * v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j * v + } + return is + } + return current +} + +func mutateDivide(current, value any) any { + if i, ok := current.(int); ok { + v := value.(int) + return i / v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i / v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j / v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j / v + } + return is + } + return current +} + +func mutateModulo(current, value any) any { + if i, ok := current.(int); ok { + v := value.(int) + return i % v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j % v + } + return is + } + return current +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go new file mode 100644 index 0000000000..cc648e1023 --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go @@ -0,0 +1,797 @@ +package updates + +import ( + "fmt" + + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +// ReferenceProvider should be implemented by a database that tracks references +type ReferenceProvider interface { + // GetReferences provides the references to the provided row + GetReferences(database, table, uuid string) (database.References, error) + // Get provides the corresponding model + Get(database, table string, uuid string) (model.Model, error) +} + +// DatabaseUpdate bundles updates together with the updated +// reference information +type DatabaseUpdate struct { + ModelUpdates + referenceUpdates database.References +} + +func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error { + refsCopy := database.References{} + // since refsCopy is empty, this will just copy everything + applyReferenceModifications(refsCopy, u.referenceUpdates) + return do(refsCopy) +} + +func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate { + return DatabaseUpdate{ + ModelUpdates: updates, + referenceUpdates: references, + } +} + +// ProcessReferences tracks referential integrity for the provided set of +// updates. It returns an updated set of updates which includes additional +// updates and updated references as a result of the reference garbage +// collection described in RFC7047. These additional updates resulting from the +// reference garbage collection are also returned separately. Any constraint or +// referential integrity violation is returned as an error. +func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + referenceTracker := newReferenceTracker(dbModel, provider) + return referenceTracker.processReferences(updates) +} + +type referenceTracker struct { + dbModel model.DatabaseModel + provider ReferenceProvider + + // updates that are being processed + updates ModelUpdates + + // references are the updated references by the set of updates processed + references database.References + + // helper maps to track the rows that we are processing and their tables + tracked map[string]string + added map[string]string + deleted map[string]string +} + +func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker { + return &referenceTracker{ + dbModel: dbModel, + provider: provider, + } +} + +func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + rt.updates = updates + rt.tracked = make(map[string]string) + rt.added = make(map[string]string) + rt.deleted = make(map[string]string) + rt.references = make(database.References) + + referenceUpdates, err := rt.processReferencesLoop(updates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + // merge the updates generated from reference tracking into the main updates + err = updates.Merge(rt.dbModel, referenceUpdates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + return updates, referenceUpdates, rt.references, nil +} + +func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) { + referenceUpdates := ModelUpdates{} + + // references can be transitive and deleting them can lead to further + // references having to be removed so loop until there are no updates to be + // made + for len(updates.updates) > 0 { + // update the references from the updates + err := rt.processModelUpdates(updates) + if err != nil { + return ModelUpdates{}, err + } + + // process strong reference integrity + updates, err = rt.processStrongReferences() + if err != nil { + return ModelUpdates{}, err + } + + // process weak reference integrity + weakUpdates, err := rt.processWeakReferences() + if err != nil { + return ModelUpdates{}, err + } + + // merge strong and weak reference updates + err = updates.Merge(rt.dbModel, weakUpdates) + if err != nil { + return ModelUpdates{}, err + } + + // merge updates from this iteration to the overall reference updates + err = referenceUpdates.Merge(rt.dbModel, updates) + if err != nil { + return ModelUpdates{}, err + } + } + + return referenceUpdates, nil +} + +// processModelUpdates keeps track of the updated references by a set of updates +func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error { + tables := updates.GetUpdatedTables() + for _, table := range tables { + err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { + return rt.processRowUpdate(table, uuid, &row) + }) + if err != nil { + return err + } + } + return nil +} + +// processRowUpdate keeps track of the updated references by a given row update +func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error { + + // getReferencesFromRowModify extracts updated references from the + // modifications. Following the same strategy as the modify field of Update2 + // notification, it will extract a difference, that is, both old removed + // references and new added references are extracted. This difference will + // then be applied to currently tracked references to come up with the + // updated references. + + // For more info on the modify field of Update2 notification and the + // strategy used to apply differences, check + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + + var updateRefs database.References + switch { + case row.Delete != nil: + rt.deleted[uuid] = table + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old) + case row.Modify != nil: + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old) + case row.Insert != nil: + if !isRoot(&rt.dbModel, table) { + // track rows added that are not part of the root set, we might need + // to delete those later + rt.added[uuid] = table + rt.tracked[uuid] = table + } + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil) + } + + // (lazy) initialize existing references to the same rows from the database + for spec, refs := range updateRefs { + for to := range refs { + err := rt.initReferences(spec.ToTable, to) + if err != nil { + return err + } + } + } + + // apply the reference modifications to the initialized references + applyReferenceModifications(rt.references, updateRefs) + + return nil +} + +// processStrongReferences adds delete operations for rows that are not part of +// the root set and are no longer strongly referenced. Returns a referential +// integrity violation if a nonexistent row is strongly referenced or a strongly +// referenced row has been deleted. +func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to the deleted rows + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + // track if rows are referenced or not + isReferenced := map[string]bool{} + + // go over the updated references + for spec, refs := range rt.references { + + // we only care about strong references + if !isStrong(&rt.dbModel, spec) { + continue + } + + for to, from := range refs { + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if !exists { + for _, uuid := range from { + // strong reference to a row that does not exist + return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf( + "Table %s column %s row %s references nonexistent or deleted row %s in table %s", + spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable)) + } + // we deleted the row ourselves on a previous loop + continue + } + + // track if this row is referenced from this location spec + isReferenced[to] = isReferenced[to] || len(from) > 0 + } + } + + // inserted rows that are unreferenced and not part of the root set will + // silently be dropped from the updates + for uuid := range rt.added { + if isReferenced[uuid] { + continue + } + isReferenced[uuid] = false + } + + // delete rows that are not referenced + updates := ModelUpdates{} + for uuid, isReferenced := range isReferenced { + if isReferenced { + // row is still referenced, ignore + continue + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + table := rt.tracked[uuid] + if isRoot(&rt.dbModel, table) { + // table is part of the root set, ignore + continue + } + + // delete row that is not part of the root set and is no longer + // referenced + update, err := rt.deleteRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// processWeakReferences deletes weak references to rows that were deleted. +// Returns a constraint violation if this results in invalid values +func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to rows that might have + // been deleted as a result of strong reference garbage collection + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + tables := map[string]string{} + originalRows := map[string]ovsdb.Row{} + updatedRows := map[string]ovsdb.Row{} + + for spec, refs := range rt.references { + // fetch some reference information from the schema + extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + isEmptyAllowed := minLenAllowed == 0 + + if refType != ovsdb.Weak { + // we only care about weak references + continue + } + + for to, from := range refs { + if len(from) == 0 { + // not referenced from anywhere, ignore + continue + } + + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if exists { + // we only care about rows that have been deleted or otherwise + // don't exist + continue + } + + // generate the updates to remove the references to deleted rows + for _, uuid := range from { + if _, ok := updatedRows[uuid]; !ok { + updatedRows[uuid] = ovsdb.NewRow() + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + // fetch the original rows + if originalRows[uuid] == nil { + originalRow, err := rt.getRow(spec.FromTable, uuid) + if err != nil { + return ModelUpdates{}, err + } + if originalRow == nil { + return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid) + } + originalRows[uuid] = *originalRow + } + + var becomesLen int + switch extendedType { + case ovsdb.TypeMap: + // a map referencing the row + // generate the mutation to remove the entry form the map + originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap + var mutationMap map[any]any + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationMap = map[any]any{} + } else { + mutationMap = value.(ovsdb.OvsMap).GoMap + } + // copy the map entries referencing the row from the original map + mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to}) + + // track the new length of the map + if !isEmptyAllowed { + becomesLen = len(originalMap) - len(mutationMap) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap} + + case ovsdb.TypeSet: + // a set referencing the row + // generate the mutation to remove the entry form the set + var mutationSet []any + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationSet = []any{} + } else { + mutationSet = value.(ovsdb.OvsSet).GoSet + } + mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to}) + + // track the new length of the set + if !isEmptyAllowed { + originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet + becomesLen = len(originalSet) - len(mutationSet) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet} + + case ovsdb.TypeUUID: + // this is an atomic UUID value that needs to be cleared + updatedRows[uuid][spec.FromColumn] = nil + becomesLen = 0 + } + + if becomesLen < minLenAllowed { + return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf( + "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+ + "row %s caused this column to have an invalid length.", + spec.FromColumn, spec.FromTable, uuid)) + } + + // track the table of the row we are going to update + tables[uuid] = spec.FromTable + } + } + } + + // process the updates + updates := ModelUpdates{} + for uuid, rowUpdate := range updatedRows { + update, err := rt.updateRow(tables[uuid], uuid, rowUpdate) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +func copyMapKeyValues(from, to map[any]any, isKey bool, keyValue ovsdb.UUID) map[any]any { + if isKey { + to[keyValue] = from[keyValue] + return to + } + for key, value := range from { + if value.(ovsdb.UUID) == keyValue { + to[key] = from[key] + } + } + return to +} + +// initReferences initializes the references to the provided row from the +// database +func (rt *referenceTracker) initReferences(table, uuid string) error { + if _, ok := rt.tracked[uuid]; ok { + // already initialized + return nil + } + existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return err + } + rt.references.UpdateReferences(existingRefs) + rt.tracked[uuid] = table + return nil +} + +func (rt *referenceTracker) initReferencesOfDeletedRows() error { + for uuid, table := range rt.deleted { + err := rt.initReferences(table, uuid) + if err != nil { + return err + } + } + return nil +} + +// deleteRow adds an update to delete the provided row. +func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + row, err := rt.getRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + updates := ModelUpdates{} + update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row} + err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update) + + rt.deleted[uuid] = table + + return updates, err +} + +// updateRow generates updates for the provided row +func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + // In agreement with processWeakReferences, columns with values are assumed + // to be values of sets or maps that need to be mutated for deletion. + // Columns with no values are assumed to be atomic optional values that need + // to be cleared with an update. + + mutations := make([]ovsdb.Mutation, 0, len(row)) + update := ovsdb.Row{} + for column, value := range row { + if value != nil { + mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value)) + continue + } + update[column] = ovsdb.OvsSet{GoSet: []any{}} + } + + updates := ModelUpdates{} + + if len(mutations) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: table, + Mutations: mutations, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + if len(update) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: table, + Row: update, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// getModel gets the model from the updates or the database +func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // model has been deleted + return nil, nil + } + // look for the model in the updates + model := rt.updates.GetModel(table, uuid) + if model != nil { + return model, nil + } + // look for the model in the database + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + return model, nil +} + +// getRow gets the row from the updates or the database +func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // row has been deleted + return nil, nil + } + // look for the row in the updates + row := rt.updates.GetRow(table, uuid) + if row != nil { + return row, nil + } + // look for the model in the database and build the row + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + info, err := rt.dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + newRow, err := rt.dbModel.Mapper.NewRow(info) + if err != nil { + return nil, err + } + return &newRow, nil +} + +// rowExists returns whether the row exists either in the updates or the database +func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) { + model, err := rt.getModel(table, uuid) + return model != nil, err +} + +func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References { + refs := database.References{} + for column, value := range *modify { + var oldValue any + if old != nil { + oldValue = (*old)[column] + } + crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue) + refs.UpdateReferences(crefs) + } + return refs +} + +func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old any) database.References { + switch v := modify.(type) { + case ovsdb.UUID: + var oldUUID ovsdb.UUID + if old != nil { + oldUUID = old.(ovsdb.UUID) + } + return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID) + case ovsdb.OvsSet: + var oldSet ovsdb.OvsSet + if old != nil { + oldSet = old.(ovsdb.OvsSet) + } + return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet) + case ovsdb.OvsMap: + return getReferenceModificationsFromMap(dbModel, table, uuid, column, v) + } + return nil +} + +func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References { + if len(value.GoMap) == 0 { + return nil + } + + // get the referenced table + keyRefTable := refTable(dbModel, table, column, false) + valueRefTable := refTable(dbModel, table, column, true) + if keyRefTable == "" && valueRefTable == "" { + return nil + } + + from := uuid + keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false} + valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true} + + refs := database.References{} + for k, v := range value.GoMap { + if keyRefTable != "" { + switch to := k.(type) { + case ovsdb.UUID: + if _, ok := refs[keySpec]; !ok { + refs[keySpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[keySpec][to.GoUUID]; !ok { + refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from) + } + } + } + if valueRefTable != "" { + switch to := v.(type) { + case ovsdb.UUID: + if _, ok := refs[valueSpec]; !ok { + refs[valueSpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[valueSpec][to.GoUUID]; !ok { + refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from) + } + } + } + } + + return refs +} + +func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References { + // if the modify set is empty, it means the op is clearing an atomic value + // so pick the old value instead + value := modify + if len(modify.GoSet) == 0 { + value = old + } + + if len(value.GoSet) == 0 { + return nil + } + + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + refs := database.References{spec: database.Reference{}} + for _, v := range value.GoSet { + switch to := v.(type) { + case ovsdb.UUID: + refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from) + } + } + return refs +} + +func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References { + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + to := modify.GoUUID + refs := database.References{spec: {to: {from}}} + if old.GoUUID != "" { + // extract the old value as well + refs[spec][old.GoUUID] = []string{from} + } + return refs +} + +// applyReferenceModifications updates references in 'a' from those in 'b' +func applyReferenceModifications(a, b database.References) { + for spec, bv := range b { + for to, bfrom := range bv { + if av, ok := a[spec]; ok { + if afrom, ok := av[to]; ok { + r, _ := applyDifference(afrom, bfrom) + av[to] = r.([]string) + } else { + // this reference is not in 'a', so add it + av[to] = bfrom + } + } else { + // this reference is not in 'a', so add it + a[spec] = database.Reference{to: bfrom} + } + } + } +} + +func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) { + tSchema := dbModel.Schema.Table(table) + if tSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table)) + } + + cSchema := tSchema.Column(column) + if cSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column)) + } + + cType := cSchema.TypeObj + if cType == nil { + // this is not a reference + return "", 0, "", "" + } + + var bType *ovsdb.BaseType + switch { + case !mapValue && cType.Key != nil: + bType = cType.Key + case mapValue && cType.Value != nil: + bType = cType.Value + default: + panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column)) + } + if bType.Type != ovsdb.TypeUUID { + // this is not a reference + return "", 0, "", "" + } + + // treat optional values represented with sets as atomic UUIDs + extendedType := cSchema.Type + if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 { + extendedType = ovsdb.TypeUUID + } + + rType, err := bType.RefType() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + rTable, err := bType.RefTable() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + return extendedType, cType.Min(), rType, rTable +} + +func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType { + _, _, _, refTable := refInfo(dbModel, table, column, mapValue) + return refTable +} + +func isRoot(dbModel *model.DatabaseModel, table string) bool { + isRoot, err := dbModel.Schema.IsRoot(table) + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + return isRoot +} + +func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool { + _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + return refType == ovsdb.Strong +} diff --git a/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go b/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go new file mode 100644 index 0000000000..89a9a6cb6f --- /dev/null +++ b/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go @@ -0,0 +1,528 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" +) + +type rowUpdate2 = ovsdb.RowUpdate2 + +// modelUpdate contains an update in model and OVSDB RowUpdate2 notation +type modelUpdate struct { + rowUpdate2 *rowUpdate2 + old model.Model + new model.Model +} + +// isEmpty returns whether this update is empty +func (mu modelUpdate) isEmpty() bool { + return mu == modelUpdate{} +} + +// ModelUpdates contains updates indexed by table and uuid +type ModelUpdates struct { + updates map[string]map[string]modelUpdate +} + +// GetUpdatedTables returns the tables that have updates +func (u ModelUpdates) GetUpdatedTables() []string { + tables := make([]string, 0, len(u.updates)) + for table, updates := range u.updates { + if len(updates) > 0 { + tables = append(tables, table) + } + } + return tables +} + +// ForEachModelUpdate processes each row update of a given table in model +// notation +func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, newModel model.Model) error) error { + models := u.updates[table] + for uuid, model := range models { + err := do(uuid, model.old, model.new) + if err != nil { + return err + } + } + return nil +} + +// ForEachRowUpdate processes each row update of a given table in OVSDB +// RowUpdate2 notation +func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error { + rows := u.updates[table] + for uuid, row := range rows { + err := do(uuid, *row.rowUpdate2) + if err != nil { + return err + } + } + return nil +} + +// GetModel returns the last known state of the requested model. If the model is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetModel(table, uuid string) model.Model { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.new + } + } + return nil +} + +// GetRow returns the last known state of the requested row. If the row is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.rowUpdate2.New + } + } + return nil +} + +// Merge a set of updates with an earlier set of updates +func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, newModel ModelUpdates) error { + for table, models := range newModel.updates { + for uuid, update := range models { + err := u.addUpdate(dbModel, table, uuid, update) + if err != nil { + return err + } + } + } + return nil +} + +// AddOperation adds an update for a model from a OVSDB Operation. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error { + switch op.Op { + case ovsdb.OperationInsert: + return u.addInsertOperation(dbModel, table, uuid, op) + case ovsdb.OperationUpdate: + return u.addUpdateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationMutate: + return u.addMutateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationDelete: + return u.addDeleteOperation(dbModel, table, uuid, current, op) + default: + return fmt.Errorf("database update from operation %#v not supported", op.Op) + } +} + +// AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error { + switch { + case ru.Old == nil && ru.New != nil: + newModel, err := model.CreateModel(dbModel, table, ru.New, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: newModel, rowUpdate2: &rowUpdate2{New: ru.New}}) + if err != nil { + return err + } + case ru.Old != nil && ru.New != nil: + old := current + newModel := model.Clone(current) + info, err := dbModel.NewModelInfo(newModel) + if err != nil { + return err + } + changed, err := updateModel(dbModel, table, info, ru.New, nil) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: newModel, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}}) + if err != nil { + return err + } + case ru.New == nil: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}}) + if err != nil { + return err + } + } + return nil +} + +// AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error { + switch { + case ru2.Initial != nil: + ru2.Insert = ru2.Initial + fallthrough + case ru2.Insert != nil: + newModel, err := model.CreateModel(dbModel, table, ru2.Insert, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: newModel, rowUpdate2: &ru2}) + if err != nil { + return err + } + case ru2.Modify != nil: + old := current + newModel := model.Clone(current) + info, err := dbModel.NewModelInfo(newModel) + if err != nil { + return err + } + changed, err := modifyModel(dbModel, table, info, ru2.Modify) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: newModel, rowUpdate2: &ru2}) + if err != nil { + return err + } + default: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2}) + if err != nil { + return err + } + } + return nil +} + +func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error { + if u.updates == nil { + u.updates = map[string]map[string]modelUpdate{} + } + if _, ok := u.updates[table]; !ok { + u.updates[table] = make(map[string]modelUpdate) + } + + ts := dbModel.Schema.Table(table) + update, err := merge(ts, u.updates[table][uuid], update) + if err != nil { + return err + } + + if !update.isEmpty() { + u.updates[table][uuid] = update + return nil + } + + // If after the merge this amounts to no update, remove it from the list and + // clean up + delete(u.updates[table], uuid) + if len(u.updates[table]) == 0 { + delete(u.updates, table) + } + if len(u.updates) == 0 { + u.updates = nil + } + + return nil +} + +func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error { + m := dbModel.Mapper + + model, err := dbModel.NewModel(table) + if err != nil { + return err + } + + mapperInfo, err := dbModel.NewModelInfo(model) + if err != nil { + return err + } + + err = m.GetRowData(&op.Row, mapperInfo) + if err != nil { + return err + } + + err = mapperInfo.SetField("_uuid", uuid) + if err != nil { + return err + } + + resultRow, err := m.NewRow(mapperInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: nil, + new: model, + rowUpdate2: &rowUpdate2{ + Insert: &resultRow, + New: &resultRow, + Old: nil, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + newModel := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(newModel) + if err != nil { + return err + } + + delta := ovsdb.NewRow() + changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta) + if err != nil { + return err + } + if !changed { + return nil + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: newModel, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + schema := dbModel.Schema.Table(table) + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + newModel := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(newModel) + if err != nil { + return err + } + + differences := make(map[string]any) + for _, mutation := range op.Mutations { + column := schema.Column(mutation.Column) + if column == nil { + continue + } + + var nativeValue any + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) { + nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value) + if err != nil { + return err + } + } else { + nativeValue, err = ovsdb.OvsToNative(column, mutation.Value) + if err != nil { + return err + } + } + + if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil { + return err + } + + current, err := newInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + + newValue, diff := mutate(current, mutation.Mutator, nativeValue) + if err := newInfo.SetField(mutation.Column, newValue); err != nil { + return err + } + + old, err := oldInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + diff, changed := mergeDifference(old, differences[mutation.Column], diff) + if changed { + differences[mutation.Column] = diff + } else { + delete(differences, mutation.Column) + } + } + + if len(differences) == 0 { + return nil + } + + delta := ovsdb.NewRow() + for column, diff := range differences { + colSchema := schema.Column(column) + diffOvs, err := ovsdb.NativeToOvs(colSchema, diff) + if err != nil { + return err + } + delta[column] = diffOvs + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: newModel, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, _ *ovsdb.Operation) error { + m := dbModel.Mapper + + info, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(info) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: nil, + rowUpdate2: &rowUpdate2{ + Delete: &ovsdb.Row{}, + Old: &oldRow, + }, + }, + ) + + return err +} + +func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, update, modify, false) +} + +func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, modify, nil, true) +} + +// updateOrModifyModel updates info about a model with a given row containing +// the change. The change row itself can be interpreted as an update or a +// modify. If the change is an update and a modify row is provided, it will be +// filled with the modify data. +func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) { + schema := dbModel.Schema.Table(table) + var changed bool + + for column, updateOvs := range *changeRow { + colSchema := schema.Column(column) + if colSchema == nil { + // ignore columns we don't know about in our schema + continue + } + + currentNative, err := info.FieldByColumn(column) + if err != nil { + return false, err + } + + updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs) + if err != nil { + return false, err + } + + if isModify { + differenceNative, isDifferent := applyDifference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + err = info.SetField(column, differenceNative) + if err != nil { + return false, err + } + } else { + differenceNative, isDifferent := difference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + if isDifferent && modifyRow != nil { + deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative) + if err != nil { + return false, err + } + (*modifyRow)[column] = deltaOvs + } + err = info.SetField(column, updateNative) + if err != nil { + return false, err + } + } + } + + return changed, nil +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 0000000000..a51269d91a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 hash algorithms and the SHAKE extendable +// output functions defined in FIPS 202. +// +// Most of this package is a wrapper around the crypto/sha3 package in the +// standard library. The only exception is the legacy Keccak hash functions. +package sha3 + +import ( + "crypto/sha3" + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +// +// It is a wrapper for the [sha3.New224] function in the standard library. +// +//go:fix inline +func New224() hash.Hash { + return sha3.New224() +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +// +// It is a wrapper for the [sha3.New256] function in the standard library. +// +//go:fix inline +func New256() hash.Hash { + return sha3.New256() +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +// +// It is a wrapper for the [sha3.New384] function in the standard library. +// +//go:fix inline +func New384() hash.Hash { + return sha3.New384() +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +// +// It is a wrapper for the [sha3.New512] function in the standard library. +// +//go:fix inline +func New512() hash.Hash { + return sha3.New512() +} + +// Sum224 returns the SHA3-224 digest of the data. +// +// It is a wrapper for the [sha3.Sum224] function in the standard library. +// +//go:fix inline +func Sum224(data []byte) [28]byte { + return sha3.Sum224(data) +} + +// Sum256 returns the SHA3-256 digest of the data. +// +// It is a wrapper for the [sha3.Sum256] function in the standard library. +// +//go:fix inline +func Sum256(data []byte) [32]byte { + return sha3.Sum256(data) +} + +// Sum384 returns the SHA3-384 digest of the data. +// +// It is a wrapper for the [sha3.Sum384] function in the standard library. +// +//go:fix inline +func Sum384(data []byte) [48]byte { + return sha3.Sum384(data) +} + +// Sum512 returns the SHA3-512 digest of the data. +// +// It is a wrapper for the [sha3.Sum512] function in the standard library. +// +//go:fix inline +func Sum512(data []byte) [64]byte { + return sha3.Sum512(data) +} diff --git a/vendor/golang.org/x/crypto/sha3/legacy_hash.go b/vendor/golang.org/x/crypto/sha3/legacy_hash.go new file mode 100644 index 0000000000..b8784536e0 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/legacy_hash.go @@ -0,0 +1,263 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This implementation is only used for NewLegacyKeccak256 and +// NewLegacyKeccak512, which are not implemented by crypto/sha3. +// All other functions in this package are wrappers around crypto/sha3. + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "hash" + "unsafe" + + "golang.org/x/sys/cpu" +) + +const ( + dsbyteKeccak = 0b00000001 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK512 = (1600 - 512) / 8 + rateK1024 = (1600 - 1024) / 8 +) + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +type state struct { + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the buffer indexes, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.n = 0 +} + +func (d *state) clone() *state { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. +func (d *state) permute() { + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute() { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in the sponge because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.a[d.n] ^= d.dsbyte + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.a[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (n int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + + n = len(p) + + for len(p) > 0 { + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute() + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + // Apply the permutation if we've squeezed the sponge dry. + if d.n == d.rate { + d.permute() + } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} + +const ( + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicKeccak) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicKeccak)]) + b = b[len(magicKeccak):] + switch { + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go b/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go new file mode 100644 index 0000000000..101588c16c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go @@ -0,0 +1,416 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This implementation is only used for NewLegacyKeccak256 and +// NewLegacyKeccak512, which are not implemented by crypto/sha3. +// All other functions in this package are wrappers around crypto/sha3. + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 0000000000..6f3f70c265 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import ( + "crypto/sha3" + "hash" + "io" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum.) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + return &shakeWrapper{sha3.NewSHAKE128(), 32, false, sha3.NewSHAKE128} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + return &shakeWrapper{sha3.NewSHAKE256(), 64, false, sha3.NewSHAKE256} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + return &shakeWrapper{sha3.NewCSHAKE128(N, S), 32, false, func() *sha3.SHAKE { + return sha3.NewCSHAKE128(N, S) + }} +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + return &shakeWrapper{sha3.NewCSHAKE256(N, S), 64, false, func() *sha3.SHAKE { + return sha3.NewCSHAKE256(N, S) + }} +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} + +// shakeWrapper adds the Size, Sum, and Clone methods to a sha3.SHAKE +// to implement the ShakeHash interface. +type shakeWrapper struct { + *sha3.SHAKE + outputLen int + squeezing bool + newSHAKE func() *sha3.SHAKE +} + +func (w *shakeWrapper) Read(p []byte) (n int, err error) { + w.squeezing = true + return w.SHAKE.Read(p) +} + +func (w *shakeWrapper) Clone() ShakeHash { + s := w.newSHAKE() + b, err := w.MarshalBinary() + if err != nil { + panic(err) // unreachable + } + if err := s.UnmarshalBinary(b); err != nil { + panic(err) // unreachable + } + return &shakeWrapper{s, w.outputLen, w.squeezing, w.newSHAKE} +} + +func (w *shakeWrapper) Size() int { return w.outputLen } + +func (w *shakeWrapper) Sum(b []byte) []byte { + if w.squeezing { + panic("sha3: Sum after Read") + } + out := make([]byte, w.outputLen) + // Clone the state so that we don't affect future Write calls. + s := w.Clone() + s.Read(out) + return append(b, out...) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a8bfe87fce..8bfe9c9d9f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -50,9 +50,19 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 # github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 +# github.com/cenkalti/hub v1.0.2 +## explicit; go 1.20 +github.com/cenkalti/hub +# github.com/cenkalti/rpc2 v1.0.5 +## explicit; go 1.20 +github.com/cenkalti/rpc2 +github.com/cenkalti/rpc2/jsonrpc # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -155,6 +165,15 @@ github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.9.0 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 +# github.com/gabriel-vasile/mimetype v1.4.10 +## explicit; go 1.21 +github.com/gabriel-vasile/mimetype +github.com/gabriel-vasile/mimetype/internal/charset +github.com/gabriel-vasile/mimetype/internal/csv +github.com/gabriel-vasile/mimetype/internal/json +github.com/gabriel-vasile/mimetype/internal/magic +github.com/gabriel-vasile/mimetype/internal/markup +github.com/gabriel-vasile/mimetype/internal/scan # github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 ## explicit; go 1.13 github.com/go-asn1-ber/asn1-ber @@ -227,6 +246,16 @@ github.com/go-openapi/swag/typeutils # github.com/go-openapi/swag/yamlutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/yamlutils +# github.com/go-playground/locales v0.14.1 +## explicit; go 1.17 +github.com/go-playground/locales +github.com/go-playground/locales/currency +# github.com/go-playground/universal-translator v0.18.1 +## explicit; go 1.18 +github.com/go-playground/universal-translator +# github.com/go-playground/validator/v10 v10.28.0 +## explicit; go 1.24.0 +github.com/go-playground/validator/v10 # github.com/go-stack/stack v1.8.1 ## explicit; go 1.17 # github.com/godbus/dbus/v5 v5.1.0 @@ -370,6 +399,10 @@ github.com/karrick/godirwalk # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff +# github.com/leodido/go-urn v1.4.0 +## explicit; go 1.18 +github.com/leodido/go-urn +github.com/leodido/go-urn/scim/schema # github.com/libopenstorage/openstorage v1.0.0 ## explicit github.com/libopenstorage/openstorage/api @@ -770,6 +803,16 @@ github.com/openshift/route-controller-manager/pkg/route/ingress github.com/openshift/route-controller-manager/pkg/route/ingressip github.com/openshift/route-controller-manager/pkg/routecontroller github.com/openshift/route-controller-manager/pkg/version +# github.com/ovn-kubernetes/libovsdb v0.8.2-0.20260302130604-c07ce22366ac +## explicit; go 1.24.0 +github.com/ovn-kubernetes/libovsdb/cache +github.com/ovn-kubernetes/libovsdb/client +github.com/ovn-kubernetes/libovsdb/database +github.com/ovn-kubernetes/libovsdb/mapper +github.com/ovn-kubernetes/libovsdb/model +github.com/ovn-kubernetes/libovsdb/ovsdb +github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb +github.com/ovn-kubernetes/libovsdb/updates # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv @@ -994,6 +1037,7 @@ golang.org/x/crypto/md4 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa ## explicit; go 1.25.0 golang.org/x/exp/slices @@ -3200,6 +3244,9 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/knftables v0.0.20 +## explicit; go 1.20 +sigs.k8s.io/knftables # sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 => github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20260304192652-72835e43c775 ## explicit; go 1.24.0 sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 diff --git a/vendor/sigs.k8s.io/knftables/.gitignore b/vendor/sigs.k8s.io/knftables/.gitignore new file mode 100644 index 0000000000..896d5783bc --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/.gitignore @@ -0,0 +1,2 @@ +*~ +hack/bin/golangci-lint diff --git a/vendor/sigs.k8s.io/knftables/CHANGELOG.md b/vendor/sigs.k8s.io/knftables/CHANGELOG.md new file mode 100644 index 0000000000..babd009a47 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/CHANGELOG.md @@ -0,0 +1,225 @@ +# ChangeLog + +## v0.0.20 + +- `List()` has been changed to use `nft list table` rather than, e.g., + `nft list sets`, to ensure that it doesn't try to parse objects in + other tables (which may have been created by newer versions of `nft` + and might trigger crashes in older versions of `nft`; see + https://issues.k8s.io/136786). (`@danwinship` based on a previous PR + from `@kairosci`). + +- A new `ListAll()` method has been added to help work around the fact + that `List()` is now much less efficient with large tables. + (`@danwinship`). + +- `ListElements()` now correctly handles maps/sets with concatenated + keys/values including CIDR values. (`@danwinship`) + +## v0.0.19 + +- Added the ability to use a single `knftables.Interface` (and a + single `knftables.Transaction`) with multiple tables/families. To do + this, pass `""` for the family and table name to `knftables.New`, + and then manually fill in the `Table` and `Family` fields in all + `Object`s you create. (`@danwinship`) + +- Added `tx.Destroy()`, corresponding to `nft destroy`. Since `nft + destroy` requires a new-ish kernel (6.3) and CLI (1.0.8), there are + also two new `knftables.New()` options: `RequireDestroy` if you want + construction to fail on older systems, or `EmulateDestroy` if you + want knftables to try to emulate "destroy" on older systems, with + some limitations. See [README.md](./README.md#destroy-operations) + for more details. (`@danwinship`) + +- Added `Counter` objects and the `tx.Reset()` verb, to support + nftables counters. (`@aroradaman`) + +- Added `Table.Flags` and `Chain.Policy`. (Note that at this time the + "owner" and "persist" table flags can't usefully be used with + knftables, since knftables opens a new connection to the kernel for + each transaction and so the table would become un-owned immediately + after it was created.) (`@danwinship`) + +- Fixed `Fake.ParseDump()` to correctly parse rules with raw payload + expressions (`@danwinship`) and `flow add` rules (`hongliangl`). + +## v0.0.18 + +- Added locking to `Fake` to allow it to be safely used concurrently. + (`@npinaeva`) + +- Added a `Flowtable` object, and `Fake` support for correctly parsing + flowtable references. (`@aojea`) + +- Fixed a bug in `Fake.ParseDump`, which accidentally required the + table to have a comment. (`@danwinship`) + +## v0.0.17 + +- `ListRules()` now accepts `""` for the chain name, meaning to list + all rules in the table. (`@caseydavenport`) + +- `ListElements()` now handles elements with prefix/CIDR values (e.g., + `"192.168.0.0/16"`; these are represented specially in the JSON + format and the old code didn't handle them). (`@caseydavenport`) + +- Added `NumOperations()` to `Transaction` (which lets you figure out + belatedly whether you added anything to the transaction or not, and + could also be used for metrics). (`@fasaxc`) + +- `knftables.Interface` now reuses the same `bytes.Buffer` for each + call to `nft` rather than constructing a new one each time, saving + time and memory. (`@aroradaman`) + +- Fixed map element deletion in `knftables.Fake` to not mistakenly + require that you fill in the `.Value` of the element. (`@npinaeva`) + +- Added `Fake.LastTransaction`, to retrieve the most-recently-executed + transaction. (`@npinaeva`) + +## v0.0.16 + +- Fixed a bug in `Fake.ParseDump()` when using IPv6. (`@npinaeva`) + +## v0.0.15 + +- knftables now requires the nft binary to be v1.0.1 or later. This is + because earlier versions (a) had bugs that might cause them to crash + when parsing rules created by later versions of nft, and (b) always + parsed the entire ruleset at startup, even if you were only trying + to operate on a single table. The combination of those two factors + means that older versions of nft can't reliably be used from inside + a container. (`@danwinship`) + +- Fixed a bug that meant we were never setting comments on + tables/chains/sets/etc, even if nft and the kernel were both new + enough to support it. (`@tnqn`) + +- Added `Fake.ParseDump()`, to load a `Fake` from a `Fake.Dump()` + output. (`@npinaeva`) + +## v0.0.14 + +- Renamed the package `"sigs.k8s.io/knftables"`, reflecting its new + home at https://github.com/kubernetes-sigs/knftables/ + +- Improvements to `Fake`: + + - `Fake.Run()` is now properly transactional, and will have no + side effects if an error occurs. + + - `Fake.Dump()` now outputs all `add chain`, `add set`, and `add + table` commands before any `add rule` and `add element` + commands, to ensure that the dumped ruleset can be passed to + `nft -f` without errors. + + - Conversely, `Fake.Run()` now does enough parsing of rules and + elements that it will notice rules that do lookups in + non-existent sets/maps, and rules/verdicts that jump to + non-existent chains, so it can error out in those cases. + +- Added `nft.Check()`, which is like `nft.Run()`, but using + `nft --check`. + +- Fixed support for ingress and egress hooks (by adding + `Chain.Device`). + +## v0.0.13 + +- Fixed a bug in `Fake.Run` where it was not properly returning "not + found" / "already exists" errors. + +## v0.0.12 + +- Renamed the package from `"github.com/danwinship/nftables"` to + `"github.com/danwinship/knftables"`, for less ambiguity. + +- Added `NameLengthMax` and `CommentLengthMax` constants. + +- Changed serialization of `Chain` to convert string-valued `Priority` + to numeric form, if possible. + +- (The `v0.0.11` tag exists but is not usable due to a bad `go.mod`) + +## v0.0.10 + +- Dropped `Define`, because nft defines turned out to not work the way + I thought (in particular, you can't do "$IP daddr"), so they end up + not really being useful for our purposes. + +- Made `NewTransaction` a method on `Interface` rather than a + top-level function. + +- Added `Transaction.String()`, for debugging + +- Fixed serialization of set/map elements with timeouts + +- Added special treament for `"@"` to `Concat` + +- Changed `nftables.New()` to return an `error` (doing the work that + used to be done by `nft.Present()`.) + +- Add autodetection for "object comment" support, and have + serialization just ignore comments on `Table`/`Chain`/`Set`/`Map` if + nft or the kernel does not support them. + +- Renamed `Optional()` to `PtrTo()` + +## v0.0.9 + +- Various tweaks to `Element`: + + - Changed `Key` and `Value` from `string` to `[]string` to better + support concatenated types (and dropped the `Join()` and + `Split()` helper functions that were previously used to join and + split concatenated values). + + - Split `Name` into separate `Set` and `Map` fields, which make it + clearer what is being named, and are more consistent with + `Rule.Chain`, and provide more redundancy for distinguishing set + elements from map elements. + + - Fixed serialization of map elements with a comments. + +- Rewrote `ListElements` and `ListRules` to use `nft -j`, for easier / + more reliable parsing. But this meant that `ListRules` no longer + returns the actual text of the rule. + +## v0.0.8 + +- Fixed `Fake.List` / `Fake.ListRules` / `Fake.ListElements` to return + errors that would be properly recognized by + `IsNotFound`/`IsAlreadyExists`. + +## v0.0.7 + +- Implemented `tx.Create`, `tx.Insert`, `tx.Replace` + +- Replaced `tx.AddRule` with the `Concat` function + +## v0.0.6 + +- Added `IsNotFound` and `IsAlreadyExists` error-checking functions + +## v0.0.5 + +- Moved `Define` from `Transaction` to `Interface` + +## v0.0.3, v0.0.4 + +- Improvements to `Fake` to handle `Rule` and `Element` + deletion/overwrite. + +- Added `ListRules` and `ListElements` + +- (The `v0.0.3` and `v0.0.4` tags are identical.) + +## v0.0.2 + +- Made `Interface` be specific to a single family and table. (Before, + that was specified at the `Transaction` level.) + +## v0.0.1 + +- Initial "release" diff --git a/vendor/sigs.k8s.io/knftables/CONTRIBUTING.md b/vendor/sigs.k8s.io/knftables/CONTRIBUTING.md new file mode 100644 index 0000000000..668b546684 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) - Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://k8s.dev/guide) - Main contributor documentation, or you can just jump directly to the [contributing page](https://k8s.dev/docs/guide/contributing/) +- [Contributor Cheat Sheet](https://k8s.dev/cheatsheet) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://k8s.dev/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +knftables is maintained by [Kubernetes SIG Network](https://github.com/kubernetes/community/tree/master/sig-network). + +- [sig-network slack channel](https://kubernetes.slack.com/messages/sig-network) +- [kubernetes-sig-network mailing list](https://groups.google.com/a/kubernetes.io/g/sig-network) diff --git a/vendor/sigs.k8s.io/knftables/LICENSE b/vendor/sigs.k8s.io/knftables/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/knftables/Makefile b/vendor/sigs.k8s.io/knftables/Makefile new file mode 100644 index 0000000000..981e6256a4 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/Makefile @@ -0,0 +1,32 @@ +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all build: + echo "Usage:" + echo "make test - run unit tests" + echo "make update - run gofmt, etc" + echo "make verify - run golangci, etc" + +clean: + +test: + ./hack/test.sh + +update: + ./hack/update.sh + +verify: + ./hack/verify.sh + +.PHONY: all build clean test update verify diff --git a/vendor/sigs.k8s.io/knftables/OWNERS b/vendor/sigs.k8s.io/knftables/OWNERS new file mode 100644 index 0000000000..01baa62370 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - aojea + - danwinship +approvers: + - danwinship diff --git a/vendor/sigs.k8s.io/knftables/README.md b/vendor/sigs.k8s.io/knftables/README.md new file mode 100644 index 0000000000..f531f21da4 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/README.md @@ -0,0 +1,348 @@ +# knftables: a golang nftables library + +This is a library for using nftables from Go. + +It is not intended to support arbitrary use cases, but instead +specifically focuses on supporting Kubernetes components which are +using nftables in the way that nftables is supposed to be used (as +opposed to using nftables in a naively-translated-from-iptables way, +or using nftables to do totally valid things that aren't the sorts of +things Kubernetes components are likely to need to do; see the +"[iptables porting](./docs/iptables-porting.md)" doc for more thoughts +on porting old iptables-based components to nftables.) + +knftables is still under development and is not yet API stable. (See the +section on "Possible future changes" below.) + +The library is implemented as a wrapper around the `nft` CLI, because +the CLI API is the only well-documented interface to nftables. +Although it would be possible to use netlink directly (and some other +golang-based nftables libraries do this), that would result in an API +that is quite different from all documented examples of nftables usage +(e.g. the man pages and the [nftables wiki](http://wiki.nftables.org/)) +because there is no easy way to convert the "standard" representation +of nftables rules into the netlink form. + +(Actually, it's not quite true that there's no other usable API: the +`nft` CLI is just a thin wrapper around `libnftables`, and it would be +possible for knftables to use cgo to invoke that library instead of +using an external binary. However, this would be harder to build and +ship, so I'm not bothering with that for now. But this could be done +in the future without needing to change knftables's API.) + +knftables requires nft version 1.0.1 or later, because earlier +versions would download and process the entire ruleset regardless of +what you were doing, which, besides being pointlessly inefficient, +means that in some cases, other people using new features in _their_ +tables could prevent you from modifying _your_ table. (In particular, +a change in how some rules are generated starting in nft 1.0.3 +triggers a crash in nft 0.9.9 and earlier, _even if you aren't looking +at the table containing that rule_.) + +## Usage + +Create an `Interface` object to manage operations on a single nftables +table: + +```golang +nft, err := knftables.New(knftables.IPv4Family, "my-table") +if err != nil { + return fmt.Errorf("no nftables support: %v", err) +} +``` + +`knftables.New` also takes a comma-separated list of options after the +family and table name; see the documentation for that function for +more information. + +(If you want to operate on multiple tables or multiple nftables +families, you have two options: you can either create separate +`Interface` objects for each table, or you can create a single +`Interface` and pass `""` for the family and table. In that case, you +will need to explicitly fill in the `Family` and `Table` fields of +every `Chain`, `Rule`, etc, object you create.) + +You can use the various `List*` methods on the `Interface` to check if +objects exist. `ListAll` returns a map of the names of top-level +objects in the table, sorted by object type, while `List` returns just +the names of objects of a single type. `ListElements`, `ListRules`, +and `ListCounters` returned parsed objects of the given types. Note +that `ListRules` returns *partial* `Rule` objects; it does not fill in +the `Rule` field. + +```golang +allChains, err := nft.List(ctx, "chains") +if err != nil { + return fmt.Errorf("could not list chains: %v", err) +} +for chain := range sets.New(allChains...).Difference(expectedChains) { + tx.Delete(&knftables.Chain{Name: chain}) +} + +// ... + +elements, err := nft.ListElements(ctx, "map", "mymap") +if err != nil { + return fmt.Errorf("could not list map elements: %v", err) +} + +... +``` + +To make changes, create a `Transaction`, add the appropriate +operations to the transaction, and then call `nft.Run` on it: + +```golang +tx := nft.NewTransaction() + +tx.Add(&knftables.Chain{ + Name: "mychain", + Comment: knftables.PtrTo("this is my chain"), +}) +tx.Flush(&knftables.Chain{ + Name: "mychain", +}) + +var destIP net.IP +var destPort uint16 +... +tx.Add(&knftables.Rule{ + Chain: "mychain", + Rule: knftables.Concat( + "ip daddr", destIP, + "ip protocol", "tcp", + "th port", destPort, + "jump", destChain, + ) +}) + +err := nft.Run(context, tx) +``` + +If any operation in the transaction would fail, then `Run()` will +return an error and the entire transaction will be ignored. You can +use the `knftables.IsNotFound()` and `knftables.IsAlreadyExists()` +methods to check for those well-known error types. In a large +transaction, there is no supported way to determine exactly which +operation failed. + +(You can also pass a transaction to `nft.Check()`, which uses `nft +--check`, but otherwise behaves the same as `nft.Run()`.) + +## `knftables.Transaction` operations + +`knftables.Transaction` operations correspond to the top-level commands +in the `nft` binary. Currently-supported operations are: + +- `tx.Add()`: creates an object if it does not already exist, as with `nft add` +- `tx.Create()`: creates an object, which must not already exist, as with `nft create` +- `tx.Flush()`: flushes the contents of a table/chain/set/map, as with `nft flush` +- `tx.Reset()`: resets a counter, as with `nft reset` +- `tx.Delete()`: deletes an object, which must exist, as with `nft delete` +- `tx.Destroy()`: deletes an object if it exists, as with `nft destroy` + +For `Rule` objects the semantics and operations are slightly different: + +- `tx.Add()`: appends a rule to a chain or adds it after an existing rule, as with `nft add rule` +- `tx.Insert()`: prepends a rule to a chain or inserts it before another rule, as with `nft insert rule` +- `tx.Replace()`: replaces a rule, as with `nft replace rule` +- `tx.Delete()`/`tx.Destroy()`: deletes the rule with the given `Handle`, as with `nft delete rule`/`nft destroy rule` + +### `Destroy` operations + +Actually doing `nft destroy` requires a fairly new kernel (6.3 or +later) and `nft` binary (1.0.8 or later). Trying to run a transaction +containing a `Destroy` operation on an older host will result in an +error. + +There are two construct-time options to help out with this. First, you +can specify `RequireDestroy`, if you want knftables construction to +fail on older hosts: + +```golang +nft, err := knftables.New(knftables.IPv4Family, "my-table", knftables.RequireDestroy) +if err != nil { + ... +``` + +Alternatively, you can construct the `Interface` with the +`EmulateDestroy` option: + +```golang +nft, err := knftables.New(knftables.IPv4Family, "my-table", knftables.EmulateDestroy) +``` + +in which case knftables will attempt to emulate `nft destroy` if it is +not available by doing a combination of an `add` and a `delete` (where +the `add` will succeed whether the object previously existed or not, +and then the `delete` will succeed because the object definitely +exists at that point). To ensure that this emulation will work, if +`EmulateDestroy` is in effect then `tx.Destroy()` will require that +you pass it an object that is suitable for passing to both `tx.Add()` +and `tx.Delete()` (even if the system you are currently on supports +`nft destroy`). In particular, this means that when `EmulateDestroy` +is in effect: + + - You can only `Destroy()` objects by `Name` or `Key`, not by + `Handle`. + + - You can't `Destroy()` a `Rule` (since `Rule`s can only be deleted + by `Handle`). + + - If you include optional fields in the object (e.g. base chain + properties), they need to be correct (since an `Add()` would fail + if you passed different values). However, note that you *can* just + leave the optional fields unset. + + - When `Destroy()`ing a `Set` or `Map` you must include the correct + `Type` (since an `Add()` would fail if you did not specify it or + specified it incorrectly). + + - When `Destroy()`ing a `Map` `Element` you must include the correct + `Value` (since an `Add()` would fail if you did not specify it or + specified it incorrectly). + +## Objects + +The `Transaction` methods take arguments of type `knftables.Object`. +The currently-supported objects are: + +- `Table` +- `Flowtable` +- `Chain` +- `Rule` +- `Set` +- `Map` +- `Element` +- `Counter` + +Optional fields in objects can be filled in with the help of the +`PtrTo()` function, which just returns a pointer to its argument. + +`Concat()` can be used to concatenate a series of strings, `[]string` +arrays, and other arguments (including numbers, `net.IP`s / +`net.IPNet`s, and anything else that can be formatted usefully via +`fmt.Sprintf("%s")`) together into a single string. This is often +useful when constructing `Rule`s. + +## `knftables.Fake` + +There is a fake (in-memory) implementation of `knftables.Interface` +for use in unit tests. Use `knftables.NewFake()` instead of +`knftables.New()` to create it, and then it should work mostly the +same. See `fake.go` for more details of the public APIs for examining +the current state of the fake nftables database. + +## Missing APIs + +Various top-level object types are not yet supported. + +Most IPTables libraries have an API for "add this rule only if it +doesn't already exist", but that does not seem as useful in nftables +(or at least "in nftables as used by Kubernetes-ish components that +aren't just blindly copying over old iptables APIs"), because chains +tend to have static rules and dynamic sets/maps, rather than having +dynamic rules. If you aren't sure if a chain has the correct rules, +you can just `Flush` it and recreate all of the rules. + +`ListRules` returns `Rule` objects without the `Rule` field filled in, +because it uses the JSON API to list the rules, but there is no easy +way to convert the JSON rule representation back into plaintext form. +This means that it is only useful when either (a) you know the order +of the rules in the chain, but want to know their handles, or (b) you +can recognize the rules you are looking for by their comments, rather +than the rule bodies. + +## Possible future changes + +### `nft` output parsing + +`nft`'s output is documented and standardized, so it ought to be +possible for us to extract better error messages in the event of a +transaction failure. + +Additionally, if we used the `--echo` (`-e`) and `--handle` (`-a`) +flags, we could learn the handles associated with newly-created +objects in a transaction, and return these to the caller somehow. +(E.g., by setting the `Handle` field in the object that had been +passed to `tx.Add` when the transaction is run.) + +(For now, `ListRules` fills in the handles of the rules it returns, so +it's possible to find out a rule's handle after the fact that way. For +other supported object types, either handles don't exist (`Element`) +or you don't really need to know their handles because it's possible +to delete by name instead (`Table`, `Chain`, `Set`, `Map`).) + +### List APIs + +The fact that `List` works completely differently from `ListRules` and +`ListElements` is a historical artifact. + +I would like to have a single function + +```golang +List[T Object](ctx context.Context, template T) ([]T, error) +``` + +So you could say + +```golang +elements, err := nft.List(ctx, &knftables.Element{Set: "myset"}) +``` + +to list the elements of "myset". But this doesn't actually compile +("`syntax error: method must have no type parameters`") because +allowing that would apparently introduce extremely complicated edge +cases in Go generics. + +### Set/map type representation + +There is currently an annoying asymmetry in the representation of +concatenated types between `Set`/`Map` and `Element`, where the former +uses a string containing `nft` syntax, and the latter uses an array: + +```golang +tx.Add(&knftables.Set{ + Name: "firewall", + Type: "ipv4_addr . inet_proto . inet_service", +}) +tx.Add(&knftables.Element{ + Set: "firewall", + Key: []string{"10.1.2.3", "tcp", "80"}, +}) +``` + +This will probably be fixed at some point, which may result in a +change to how the `type` vs `typeof` distinction is handled as well. + +### Optimization and rule representation + +We will need to optimize the performance of large transactions. One +change that is likely is to avoid pre-concatenating rule elements in +cases like: + +```golang +tx.Add(&knftables.Rule{ + Chain: "mychain", + Rule: knftables.Concat( + "ip daddr", destIP, + "ip protocol", "tcp", + "th port", destPort, + "jump", destChain, + ) +}) +``` + +This will presumably require a change to `knftables.Rule` and/or +`knftables.Concat()` but I'm not sure exactly what it will be. + +## Community, discussion, contribution, and support + +knftables is maintained by [Kubernetes SIG Network](https://github.com/kubernetes/community/tree/master/sig-network). + +- [sig-network slack channel](https://kubernetes.slack.com/messages/sig-network) +- [kubernetes-sig-network mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-network) + +See [`CONTRIBUTING.md`](CONTRIBUTING.md) for more information about +contributing. Participation in the Kubernetes community is governed by +the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/sigs.k8s.io/knftables/SECURITY_CONTACTS b/vendor/sigs.k8s.io/knftables/SECURITY_CONTACTS new file mode 100644 index 0000000000..eb4390a2e1 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/SECURITY_CONTACTS @@ -0,0 +1,13 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Security Response Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +danwinship diff --git a/vendor/sigs.k8s.io/knftables/code-of-conduct.md b/vendor/sigs.k8s.io/knftables/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/knftables/error.go b/vendor/sigs.k8s.io/knftables/error.go new file mode 100644 index 0000000000..fe57da03b8 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/error.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "errors" + "fmt" + "os/exec" + "strings" + "syscall" +) + +type nftablesError struct { + wrapped error + msg string + errno syscall.Errno +} + +// wrapError wraps an error resulting from running nft +func wrapError(err error) error { + nerr := &nftablesError{wrapped: err, msg: err.Error()} + ee := &exec.ExitError{} + if errors.As(err, &ee) { + if len(ee.Stderr) > 0 { + nerr.msg = string(ee.Stderr) + eol := strings.Index(nerr.msg, "\n") + // The nft binary does not call setlocale() and so will return + // English error strings regardless of the locale. + enoent := strings.Index(nerr.msg, "No such file or directory") + eexist := strings.Index(nerr.msg, "File exists") + if enoent != -1 && (enoent < eol || eol == -1) { + nerr.errno = syscall.ENOENT + } else if eexist != -1 && (eexist < eol || eol == -1) { + nerr.errno = syscall.EEXIST + } + } + } + return nerr +} + +// notFoundError returns an nftablesError with the given message for which IsNotFound will +// return true. +func notFoundError(format string, args ...interface{}) error { + return &nftablesError{msg: fmt.Sprintf(format, args...), errno: syscall.ENOENT} +} + +// existsError returns an nftablesError with the given message for which IsAlreadyExists +// will return true. +func existsError(format string, args ...interface{}) error { + return &nftablesError{msg: fmt.Sprintf(format, args...), errno: syscall.EEXIST} +} + +func (nerr *nftablesError) Error() string { + return nerr.msg +} + +func (nerr *nftablesError) Unwrap() error { + return nerr.wrapped +} + +// IsNotFound tests if err corresponds to an nftables "not found" error of any sort. +// (e.g., in response to a "delete rule" command, this might indicate that the rule +// doesn't exist, or the chain doesn't exist, or the table doesn't exist.) +func IsNotFound(err error) bool { + var nerr *nftablesError + if errors.As(err, &nerr) { + return nerr.errno == syscall.ENOENT + } + return false +} + +// IsAlreadyExists tests if err corresponds to an nftables "already exists" error (e.g. +// when doing a "create" rather than an "add"). +func IsAlreadyExists(err error) bool { + var nerr *nftablesError + if errors.As(err, &nerr) { + return nerr.errno == syscall.EEXIST + } + return false +} diff --git a/vendor/sigs.k8s.io/knftables/exec.go b/vendor/sigs.k8s.io/knftables/exec.go new file mode 100644 index 0000000000..154b5bc45c --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/exec.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "os/exec" +) + +// execer is a mockable wrapper around os/exec. +type execer interface { + // LookPath wraps exec.LookPath + LookPath(file string) (string, error) + + // Run runs cmd as with cmd.Output(). If an error occurs, and the process outputs + // stderr, then that output will be returned in the error. + Run(cmd *exec.Cmd) (string, error) +} + +// realExec implements execer by actually using os/exec +type realExec struct{} + +// LookPath is part of execer +func (realExec) LookPath(file string) (string, error) { + return exec.LookPath(file) +} + +// Run is part of execer +func (realExec) Run(cmd *exec.Cmd) (string, error) { + out, err := cmd.Output() + if err != nil { + err = wrapError(err) + } + return string(out), err +} diff --git a/vendor/sigs.k8s.io/knftables/fake.go b/vendor/sigs.k8s.io/knftables/fake.go new file mode 100644 index 0000000000..0942fa276f --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/fake.go @@ -0,0 +1,933 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "context" + "fmt" + "reflect" + "regexp" + "sort" + "strings" + "sync" +) + +// Fake is a fake implementation of Interface +type Fake struct { + nftContext + // mutex is used to protect Table/Tables and LastTransaction. + // When Table/Tables and LastTransaction are accessed directly, the caller must + // acquire Fake.RLock and release when finished. + sync.RWMutex + + nextHandle int + + // Table contains the Interface's table (assuming the Fake has a default table). + // This will be `nil` until you `tx.Add()` the table. + // Make sure to acquire Fake.RLock before accessing Table in a concurrent environment. + Table *FakeTable + + // Tables contains all tables known to Fake. This will be empty until you + // `tx.Add()` a table. + // Make sure to acquire Fake.RLock before accessing Tables in a concurrent environment. + Tables map[Family]map[string]*FakeTable + + // LastTransaction is the last transaction passed to Run(). It will remain set until the + // next time Run() is called. (It is not affected by Check().) + // Make sure to acquire Fake.RLock before accessing LastTransaction in a + // concurrent environment. + LastTransaction *Transaction +} + +// FakeTable wraps Table for the Fake implementation +type FakeTable struct { + Table + + // Flowtables contains the table's flowtables, keyed by name + Flowtables map[string]*FakeFlowtable + + // Chains contains the table's chains, keyed by name + Chains map[string]*FakeChain + + // Sets contains the table's sets, keyed by name + Sets map[string]*FakeSet + + // Maps contains the table's maps, keyed by name + Maps map[string]*FakeMap + + // Counters contains the table's counters, keyed by name + Counters map[string]*FakeCounter +} + +// FakeFlowtable wraps Flowtable for the Fake implementation +type FakeFlowtable struct { + Flowtable +} + +// FakeCounter wraps Counter for the Fake implementation +type FakeCounter struct { + Counter +} + +// FakeChain wraps Chain for the Fake implementation +type FakeChain struct { + Chain + + // Rules contains the chain's rules, in order + Rules []*Rule +} + +// FakeSet wraps Set for the Fake implementation +type FakeSet struct { + Set + + // Elements contains the set's elements. You can also use the FakeSet's + // FindElement() method to see if a particular element is present. + Elements []*Element +} + +// FakeMap wraps Set for the Fake implementation +type FakeMap struct { + Map + + // Elements contains the map's elements. You can also use the FakeMap's + // FindElement() method to see if a particular element is present. + Elements []*Element +} + +// NewFake creates a new fake Interface, for unit tests +func NewFake(family Family, table string) *Fake { + if (family == "") != (table == "") { + // NewFake doesn't have an error return value, so... + panic("family and table must either both be specified or both be empty") + } + + return &Fake{ + nftContext: nftContext{ + family: family, + table: table, + }, + } +} + +var _ Interface = &Fake{} + +// ListAll is part of Interface. +func (fake *Fake) ListAll(_ context.Context) (map[string][]string, error) { + fake.RLock() + defer fake.RUnlock() + if fake.Table == nil { + return nil, notFoundError("no such table %q", fake.table) + } + + result := make(map[string][]string) + + for name := range fake.Table.Flowtables { + result["flowtable"] = append(result["flowtable"], name) + } + for name := range fake.Table.Chains { + result["chain"] = append(result["chain"], name) + } + for name := range fake.Table.Sets { + result["set"] = append(result["set"], name) + } + for name := range fake.Table.Maps { + result["map"] = append(result["map"], name) + } + for name := range fake.Table.Counters { + result["counter"] = append(result["counter"], name) + } + + return result, nil +} + +// List is part of Interface. +func (fake *Fake) List(_ context.Context, objectType string) ([]string, error) { + fake.RLock() + defer fake.RUnlock() + if fake.Table == nil { + return nil, notFoundError("no such table %q", fake.table) + } + + var result []string + + switch objectType { + case "flowtable", "flowtables": + for name := range fake.Table.Flowtables { + result = append(result, name) + } + case "chain", "chains": + for name := range fake.Table.Chains { + result = append(result, name) + } + case "set", "sets": + for name := range fake.Table.Sets { + result = append(result, name) + } + case "map", "maps": + for name := range fake.Table.Maps { + result = append(result, name) + } + case "counter", "counters": + for name := range fake.Table.Counters { + result = append(result, name) + } + + default: + return nil, fmt.Errorf("unsupported object type %q", objectType) + } + + return result, nil +} + +// ListRules is part of Interface +func (fake *Fake) ListRules(_ context.Context, chain string) ([]*Rule, error) { + fake.RLock() + defer fake.RUnlock() + if fake.Table == nil { + return nil, notFoundError("no such table %q", fake.table) + } + + rules := []*Rule{} + if chain == "" { + // Include all rules across all chains. + for _, ch := range fake.Table.Chains { + rules = append(rules, ch.Rules...) + } + } else { + ch := fake.Table.Chains[chain] + if ch == nil { + return nil, notFoundError("no such chain %q", chain) + } + rules = append(rules, ch.Rules...) + } + return rules, nil +} + +// ListElements is part of Interface +func (fake *Fake) ListElements(_ context.Context, objectType, name string) ([]*Element, error) { + fake.RLock() + defer fake.RUnlock() + if fake.Table == nil { + return nil, notFoundError("no such %s %q", objectType, name) + } + if objectType == "set" { + s := fake.Table.Sets[name] + if s != nil { + return s.Elements, nil + } + } else if objectType == "map" { + m := fake.Table.Maps[name] + if m != nil { + return m.Elements, nil + } + } + return nil, notFoundError("no such %s %q", objectType, name) +} + +// NewTransaction is part of Interface +func (fake *Fake) NewTransaction() *Transaction { + return &Transaction{nftContext: &fake.nftContext} +} + +// Run is part of Interface +func (fake *Fake) Run(_ context.Context, tx *Transaction) error { + fake.Lock() + defer fake.Unlock() + fake.LastTransaction = tx + updatedTables, err := fake.run(tx) + if err == nil { + fake.Tables = updatedTables + if fake.family != "" && fake.table != "" { + fake.Table = updatedTables[fake.family][fake.table] + } + } + return err +} + +// Check is part of Interface +func (fake *Fake) Check(_ context.Context, tx *Transaction) error { + fake.RLock() + defer fake.RUnlock() + _, err := fake.run(tx) + return err +} + +// must be called with fake.lock held +func (fake *Fake) run(tx *Transaction) (map[Family]map[string]*FakeTable, error) { + if tx.err != nil { + return nil, tx.err + } + + updatedTables := make(map[Family]map[string]*FakeTable) + for family := range fake.Tables { + updatedTables[family] = make(map[string]*FakeTable) + for name, table := range fake.Tables[family] { + updatedTables[family][name] = table.copy() + } + } + + for _, op := range tx.operations { + if op.verb == addVerb || op.verb == createVerb || op.verb == insertVerb { + fake.nextHandle++ + } + + switch obj := op.obj.(type) { + case *Table: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Name) + table := updatedTables[family][tableName] + err := checkExists(op.verb, "table", fake.table, table != nil) + if err != nil { + return nil, err + } + switch op.verb { + case flushVerb: + table = nil + fallthrough + case addVerb, createVerb: + if table != nil { + continue + } + table = &FakeTable{ + Table: *obj, + Flowtables: make(map[string]*FakeFlowtable), + Chains: make(map[string]*FakeChain), + Sets: make(map[string]*FakeSet), + Maps: make(map[string]*FakeMap), + Counters: make(map[string]*FakeCounter), + } + table.Handle = PtrTo(fake.nextHandle) + if updatedTables[family] == nil { + updatedTables[family] = make(map[string]*FakeTable) + } + updatedTables[family][tableName] = table + case deleteVerb, destroyVerb: + if table != nil { + delete(updatedTables[family], tableName) + } + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + + case *Flowtable: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingFlowtable := table.Flowtables[obj.Name] + err = checkExists(op.verb, "flowtable", obj.Name, existingFlowtable != nil) + if err != nil { + return nil, err + } + switch op.verb { + case addVerb, createVerb: + if existingFlowtable != nil { + continue + } + flowtable := *obj + flowtable.Handle = PtrTo(fake.nextHandle) + table.Flowtables[obj.Name] = &FakeFlowtable{ + Flowtable: flowtable, + } + case deleteVerb, destroyVerb: + // FIXME delete-by-handle + delete(table.Flowtables, obj.Name) + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + + case *Chain: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingChain := table.Chains[obj.Name] + err = checkExists(op.verb, "chain", obj.Name, existingChain != nil) + if err != nil { + return nil, err + } + switch op.verb { + case addVerb, createVerb: + if existingChain != nil { + continue + } + chain := *obj + chain.Handle = PtrTo(fake.nextHandle) + table.Chains[obj.Name] = &FakeChain{ + Chain: chain, + } + case flushVerb: + existingChain.Rules = nil + case deleteVerb, destroyVerb: + // FIXME delete-by-handle + delete(table.Chains, obj.Name) + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + + case *Rule: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingChain := table.Chains[obj.Chain] + if existingChain == nil { + return nil, notFoundError("no such chain %q", obj.Chain) + } + if op.verb == deleteVerb { + i := findRule(existingChain.Rules, *obj.Handle) + if i == -1 { + return nil, notFoundError("no rule with handle %d", *obj.Handle) + } + existingChain.Rules = append(existingChain.Rules[:i], existingChain.Rules[i+1:]...) + continue + } + + rule := *obj + refRule := -1 + if rule.Handle != nil { + refRule = findRule(existingChain.Rules, *obj.Handle) + if refRule == -1 { + return nil, notFoundError("no rule with handle %d", *obj.Handle) + } + } else if obj.Index != nil { + if *obj.Index >= len(existingChain.Rules) { + return nil, notFoundError("no rule with index %d", *obj.Index) + } + refRule = *obj.Index + } + + if err := checkRuleRefs(obj, table); err != nil { + return nil, err + } + + switch op.verb { + case addVerb: + if refRule == -1 { + existingChain.Rules = append(existingChain.Rules, &rule) + } else { + existingChain.Rules = append(existingChain.Rules[:refRule+1], append([]*Rule{&rule}, existingChain.Rules[refRule+1:]...)...) + } + rule.Handle = PtrTo(fake.nextHandle) + case insertVerb: + if refRule == -1 { + existingChain.Rules = append([]*Rule{&rule}, existingChain.Rules...) + } else { + existingChain.Rules = append(existingChain.Rules[:refRule], append([]*Rule{&rule}, existingChain.Rules[refRule:]...)...) + } + rule.Handle = PtrTo(fake.nextHandle) + case replaceVerb: + existingChain.Rules[refRule] = &rule + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + + case *Set: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingSet := table.Sets[obj.Name] + err = checkExists(op.verb, "set", obj.Name, existingSet != nil) + if err != nil { + return nil, err + } + switch op.verb { + case addVerb, createVerb: + if existingSet != nil { + continue + } + set := *obj + set.Handle = PtrTo(fake.nextHandle) + table.Sets[obj.Name] = &FakeSet{ + Set: set, + } + case flushVerb: + existingSet.Elements = nil + case deleteVerb, destroyVerb: + // FIXME delete-by-handle + delete(table.Sets, obj.Name) + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + case *Map: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingMap := table.Maps[obj.Name] + err = checkExists(op.verb, "map", obj.Name, existingMap != nil) + if err != nil { + return nil, err + } + switch op.verb { + case addVerb: + if existingMap != nil { + continue + } + mapObj := *obj + mapObj.Handle = PtrTo(fake.nextHandle) + table.Maps[obj.Name] = &FakeMap{ + Map: mapObj, + } + case flushVerb: + existingMap.Elements = nil + case deleteVerb, destroyVerb: + // FIXME delete-by-handle + delete(table.Maps, obj.Name) + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + case *Element: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + if obj.Set != "" { + existingSet := table.Sets[obj.Set] + if existingSet == nil { + return nil, notFoundError("no such set %q", obj.Set) + } + switch op.verb { + case addVerb, createVerb: + element := *obj + if i := findElement(existingSet.Elements, element.Key); i != -1 { + if op.verb == createVerb { + return nil, existsError("element %q already exists", strings.Join(element.Key, " . ")) + } + existingSet.Elements[i] = &element + } else { + existingSet.Elements = append(existingSet.Elements, &element) + } + case deleteVerb, destroyVerb: + element := *obj + if i := findElement(existingSet.Elements, element.Key); i != -1 { + existingSet.Elements = append(existingSet.Elements[:i], existingSet.Elements[i+1:]...) + } else if op.verb == deleteVerb { + return nil, notFoundError("no such element %q", strings.Join(element.Key, " . ")) + } + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + } else { + existingMap := table.Maps[obj.Map] + if existingMap == nil { + return nil, notFoundError("no such map %q", obj.Map) + } + if err := checkElementRefs(obj, table); err != nil { + return nil, err + } + switch op.verb { + case addVerb, createVerb: + element := *obj + if i := findElement(existingMap.Elements, element.Key); i != -1 { + if op.verb == createVerb { + return nil, existsError("element %q already exists", strings.Join(element.Key, ". ")) + } + existingMap.Elements[i] = &element + } else { + existingMap.Elements = append(existingMap.Elements, &element) + } + case deleteVerb, destroyVerb: + element := *obj + if i := findElement(existingMap.Elements, element.Key); i != -1 { + existingMap.Elements = append(existingMap.Elements[:i], existingMap.Elements[i+1:]...) + } else if op.verb == deleteVerb { + return nil, notFoundError("no such element %q", strings.Join(element.Key, " . ")) + } + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + } + case *Counter: + family, tableName, _ := getTable(&fake.nftContext, obj.Family, obj.Table) + table, err := fake.checkTable(updatedTables, family, tableName) + if err != nil { + return nil, err + } + existingCounter := table.Counters[obj.Name] + switch op.verb { + case addVerb, createVerb: + err := checkExists(op.verb, "counter", obj.Name, existingCounter != nil) + if err != nil { + return nil, err + } + if existingCounter != nil { + continue + } + obj.Handle = PtrTo(fake.nextHandle) + table.Counters[obj.Name] = &FakeCounter{*obj} + case resetVerb: + err := checkExists(op.verb, "counter", obj.Name, existingCounter != nil) + if err != nil { + return nil, err + } + table.Counters[obj.Name].Packets = PtrTo[uint64](0) + table.Counters[obj.Name].Bytes = PtrTo[uint64](0) + case deleteVerb: + if obj.Handle != nil { + var found bool + for _, counter := range table.Counters { + if *counter.Handle == *obj.Handle { + found = true + delete(table.Counters, counter.Name) + break + } + } + if !found { + return nil, notFoundError("no such counter %q", obj.Name) + } + } else { + err := checkExists(op.verb, "counter", obj.Name, existingCounter != nil) + if err != nil { + return nil, err + } + delete(table.Counters, obj.Name) + } + default: + return nil, fmt.Errorf("unhandled operation %q", op.verb) + } + default: + return nil, fmt.Errorf("unhandled object type %T", op.obj) + } + } + + return updatedTables, nil +} + +func (fake *Fake) checkTable(updatedTables map[Family]map[string]*FakeTable, family Family, tableName string) (*FakeTable, error) { + table := updatedTables[family][tableName] + if table == nil { + return nil, notFoundError("no such table \"%s\" \"%s\"", family, tableName) + } + return table, nil +} + +func checkExists(verb verb, objectType, name string, exists bool) error { + switch verb { + case addVerb, destroyVerb: + // It's fine if the object either exists or doesn't + return nil + case createVerb: + if exists { + return existsError("%s %q already exists", objectType, name) + } + default: + if !exists { + return notFoundError("no such %s %q", objectType, name) + } + } + return nil +} + +// checkRuleRefs checks for chains, sets, and maps referenced by rule in table +func checkRuleRefs(rule *Rule, table *FakeTable) error { + words := strings.Split(rule.Rule, " ") + for i, word := range words { + if strings.HasPrefix(word, "@") && !strings.Contains(word, ",") { + name := word[1:] + if i > 0 && (words[i-1] == "map" || words[i-1] == "vmap") { + if table.Maps[name] == nil { + return notFoundError("no such map %q", name) + } + } else if i > 0 && (words[i-1] == "offload" || words[i-1] == "add") { + if table.Flowtables[name] == nil { + return notFoundError("no such flowtable %q", name) + } + } else { + // recent nft lets you use a map in a set lookup + if table.Sets[name] == nil && table.Maps[name] == nil { + return notFoundError("no such set %q", name) + } + } + } else if (word == "goto" || word == "jump") && i < len(words)-1 { + name := words[i+1] + if table.Chains[name] == nil { + return notFoundError("no such chain %q", name) + } + } + } + return nil +} + +// checkElementRefs checks for chains referenced by an element +func checkElementRefs(element *Element, table *FakeTable) error { + if len(element.Value) != 1 { + return nil + } + words := strings.Split(element.Value[0], " ") + if len(words) == 2 && (words[0] == "goto" || words[0] == "jump") { + name := words[1] + if table.Chains[name] == nil { + return notFoundError("no such chain %q", name) + } + } + return nil +} + +// Dump dumps the current contents of fake, in a way that looks like an nft transaction. +func (fake *Fake) Dump() string { + fake.RLock() + defer fake.RUnlock() + + buf := &strings.Builder{} + for _, family := range sortKeys(fake.Tables) { + for _, tableName := range sortKeys(fake.Tables[family]) { + fake.dumpTable(buf, fake.Tables[family][tableName]) + } + } + return buf.String() +} + +func (fake *Fake) dumpTable(buf *strings.Builder, table *FakeTable) { + flowtables := sortKeys(table.Flowtables) + chains := sortKeys(table.Chains) + sets := sortKeys(table.Sets) + maps := sortKeys(table.Maps) + counters := sortKeys(table.Counters) + + // Write out all of the object adds first. + + table.writeOperation(addVerb, &fake.nftContext, buf) + for _, fname := range flowtables { + ft := table.Flowtables[fname] + ft.writeOperation(addVerb, &fake.nftContext, buf) + } + for _, cname := range chains { + ch := table.Chains[cname] + ch.writeOperation(addVerb, &fake.nftContext, buf) + } + for _, sname := range sets { + s := table.Sets[sname] + s.writeOperation(addVerb, &fake.nftContext, buf) + } + for _, mname := range maps { + m := table.Maps[mname] + m.writeOperation(addVerb, &fake.nftContext, buf) + } + for _, cname := range counters { + m := table.Counters[cname] + m.writeOperation(addVerb, &fake.nftContext, buf) + } + // Now write their contents. + + for _, cname := range chains { + ch := table.Chains[cname] + for _, rule := range ch.Rules { + // Avoid outputing handles + dumpRule := *rule + dumpRule.Handle = nil + dumpRule.Index = nil + dumpRule.writeOperation(addVerb, &fake.nftContext, buf) + } + } + for _, sname := range sets { + s := table.Sets[sname] + for _, element := range s.Elements { + element.writeOperation(addVerb, &fake.nftContext, buf) + } + } + for _, mname := range maps { + m := table.Maps[mname] + for _, element := range m.Elements { + element.writeOperation(addVerb, &fake.nftContext, buf) + } + } +} + +var commonRegexp = regexp.MustCompile(`add ([^ ]*) ([^ ]*) ([^ ]*)( (.*))?`) + +// ParseDump can parse a dump for a given nft instance. +// It expects fake's table name and family in all rules. +// The best way to verify that everything important was properly parsed is to +// compare given data with nft.Dump() output. +func (fake *Fake) ParseDump(data string) (err error) { + lines := strings.Split(data, "\n") + var i int + var line string + parsingDone := false + defer func() { + if err != nil && !parsingDone { + err = fmt.Errorf("%w (at line %v: %s", err, i+1, line) + } + }() + tx := fake.NewTransaction() + + for i, line = range lines { + line = strings.TrimSpace(line) + if line == "" || line[0] == '#' { + continue + } + match := commonRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("could not parse") + } + family := Family(match[2]) + table := match[3] + + // If fake has a family and table specified then the parsed family and + // table must match (but then we clear them, because we don't want them + // to be added to the returned objects, for backward compatibility). + if fake.family != "" { + if family != fake.family { + return fmt.Errorf("wrong family %q in rule", family) + } + family = "" + } + if fake.table != "" { + if table != fake.table { + return fmt.Errorf("wrong table name %q in rule", table) + } + table = "" + } + + var obj Object + switch match[1] { + case "table": + obj = &Table{} + case "flowtable": + obj = &Flowtable{} + case "chain": + obj = &Chain{} + case "rule": + obj = &Rule{} + case "map": + obj = &Map{} + case "set": + obj = &Set{} + case "element": + obj = &Element{} + case "counter": + obj = &Counter{} + default: + return fmt.Errorf("unknown object %s", match[1]) + } + err = obj.parse(family, table, match[5]) + if err != nil { + return err + } + tx.Add(obj) + } + parsingDone = true + return fake.Run(context.Background(), tx) +} + +func sortKeys[K ~string, V any](m map[K]V) []K { + keys := make([]K, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + return keys +} + +func findRule(rules []*Rule, handle int) int { + for i := range rules { + if rules[i].Handle != nil && *rules[i].Handle == handle { + return i + } + } + return -1 +} + +func findElement(elements []*Element, key []string) int { + for i := range elements { + if reflect.DeepEqual(elements[i].Key, key) { + return i + } + } + return -1 +} + +// copy creates a copy of table with new arrays/maps so we can perform a transaction +// on it without changing the original table. +func (table *FakeTable) copy() *FakeTable { + if table == nil { + return nil + } + + tcopy := &FakeTable{ + Table: table.Table, + Flowtables: make(map[string]*FakeFlowtable), + Chains: make(map[string]*FakeChain), + Sets: make(map[string]*FakeSet), + Maps: make(map[string]*FakeMap), + Counters: make(map[string]*FakeCounter), + } + for name, flowtable := range table.Flowtables { + tcopy.Flowtables[name] = &FakeFlowtable{ + Flowtable: flowtable.Flowtable, + } + } + for name, chain := range table.Chains { + tcopy.Chains[name] = &FakeChain{ + Chain: chain.Chain, + Rules: append([]*Rule{}, chain.Rules...), + } + } + for name, set := range table.Sets { + tcopy.Sets[name] = &FakeSet{ + Set: set.Set, + Elements: append([]*Element{}, set.Elements...), + } + } + for name, mapObj := range table.Maps { + tcopy.Maps[name] = &FakeMap{ + Map: mapObj.Map, + Elements: append([]*Element{}, mapObj.Elements...), + } + } + for name, counter := range table.Counters { + tcopy.Counters[name] = counter + } + return tcopy +} + +// FindElement finds an element of the set with the given key. If there is no matching +// element, it returns nil. +func (s *FakeSet) FindElement(key ...string) *Element { + index := findElement(s.Elements, key) + if index == -1 { + return nil + } + return s.Elements[index] +} + +// FindElement finds an element of the map with the given key. If there is no matching +// element, it returns nil. +func (m *FakeMap) FindElement(key ...string) *Element { + index := findElement(m.Elements, key) + if index == -1 { + return nil + } + return m.Elements[index] +} + +// ListCounters is part of Interface +func (fake *Fake) ListCounters(_ context.Context) ([]*Counter, error) { + counters := make([]*Counter, len(fake.Table.Counters)) + for _, fakeCounter := range fake.Table.Counters { + counters = append(counters, PtrTo(fakeCounter.Counter)) + } + return counters, nil +} diff --git a/vendor/sigs.k8s.io/knftables/nftables.go b/vendor/sigs.k8s.io/knftables/nftables.go new file mode 100644 index 0000000000..df920f11f0 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/nftables.go @@ -0,0 +1,698 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "sync" +) + +// Interface is an interface for running nftables commands against a given family and table. +type Interface interface { + // NewTransaction returns a new (empty) Transaction + NewTransaction() *Transaction + + // Run runs a Transaction and returns the result. The IsNotFound and + // IsAlreadyExists methods can be used to test the result. + Run(ctx context.Context, tx *Transaction) error + + // Check does a dry-run of a Transaction (as with `nft --check`) and returns the + // result. The IsNotFound and IsAlreadyExists methods can be used to test the + // result. + Check(ctx context.Context, tx *Transaction) error + + // ListAll returns a map containing the names of all objects in the table, + // grouped by object type. If there are no objects, this will return an empty list + // and no error. + ListAll(ctx context.Context) (map[string][]string, error) + + // List returns a list of the names of the objects of objectType ("chain", "set", + // "map" or "counter") in the table. If there are no such objects, this will + // return an empty list and no error. + List(ctx context.Context, objectType string) ([]string, error) + + // ListRules returns a list of the rules in a chain, in order. If no chain name is + // specified, then all rules within the table will be returned. Note that at the + // present time, the Rule objects will have their `Comment` and `Handle` fields + // filled in, but *not* the actual `Rule` field. So this can only be used to find + // the handles of rules if they have unique comments to recognize them by, or if + // you know the order of the rules within the chain. If the chain exists but + // contains no rules, this will return an empty list and no error. + ListRules(ctx context.Context, chain string) ([]*Rule, error) + + // ListElements returns a list of the elements in a set or map. (objectType should + // be "set" or "map".) If the set/map exists but contains no elements, this will + // return an empty list and no error. + ListElements(ctx context.Context, objectType, name string) ([]*Element, error) + + // ListCounters returns a list of the counters in the table. + ListCounters(ctx context.Context) ([]*Counter, error) +} + +// Option is an optional nftables feature that an Interface might or might not support +type Option string + +const ( + // NoObjectCommentEmulation turns off the default knftables.Interface behavior of + // ignoring comments on Table, Chain, Set, and Map objects if the underlying CLI + // or kernel does not support them. (The only real reason to specify this is if + // you want to avoid doing any "nft check" calls at construction time.) + NoObjectCommentEmulation Option = "NoObjectCommentEmulation" + + // RequireDestroy tells knftables.New to fail if the `nft destroy` command is not + // available. + RequireDestroy Option = "RequireDestroy" + + // EmulateDestroy tells the Interface to emulate the `nft destroy` command if it + // is not available. If you pass this option, then that will restrict the ways + // that you can use the `tx.Destroy()` method to be compatible with destroy + // emulation; see the docs for that method for more details. + EmulateDestroy Option = "EmulateDestroy" +) + +type nftContext struct { + family Family + table string + + // noObjectComments is true if comments on Table/Chain/Set/Map are not supported. + // (Comments on Rule and Element are always supported.) + noObjectComments bool + + // emulateDestroy is true if tx.Destroy() should restrict itself to destroy + // actions that are compatible with an emulated version of "nft destroy" + emulateDestroy bool + + // hasDestroy is true emulateDestroy is true but the nft binary actually supports + // "destroy" so we don't need to bother emulating it. + hasDestroy bool +} + +// realNFTables is an implementation of Interface +type realNFTables struct { + nftContext + + bufferMutex sync.Mutex + buffer *bytes.Buffer + + exec execer + path string +} + +func optionSet(options []Option, option Option) bool { + for _, o := range options { + if o == option { + return true + } + } + return false +} + +// newInternal creates a new nftables.Interface for interacting with the given table; this +// is split out from New() so it can be used from unit tests with a fakeExec. +func newInternal(family Family, table string, execer execer, options ...Option) (Interface, error) { + var err error + + if (family == "") != (table == "") { + return nil, fmt.Errorf("family and table must either both be specified or both be empty") + } + + nft := &realNFTables{ + nftContext: nftContext{ + family: family, + table: table, + }, + buffer: &bytes.Buffer{}, + exec: execer, + } + + nft.path, err = nft.exec.LookPath("nft") + if err != nil { + return nil, fmt.Errorf("could not find nftables binary: %w", err) + } + + cmd := exec.Command(nft.path, "--version") + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("could not run nftables command: %w", err) + } + if strings.HasPrefix(out, "nftables v0.") || strings.HasPrefix(out, "nftables v1.0.0 ") { + return nil, fmt.Errorf("nft version must be v1.0.1 or later (got %s)", strings.TrimSpace(out)) + } + + testFamily := family + if testFamily == "" { + testFamily = InetFamily + } + testTable := table + if testTable == "" { + testTable = "test" + } + + // Check that (a) nft works, (b) we have permission, (c) the kernel is new enough + // to support object comments. + tx := nft.NewTransaction() + tx.Add(&Table{ + Family: testFamily, + Name: testTable, + Comment: PtrTo("test"), + }) + if err := nft.Check(context.TODO(), tx); err != nil { + nft.noObjectComments = true + if !optionSet(options, NoObjectCommentEmulation) { + // Try again, checking just that (a) nft works, (b) we have permission. + tx := nft.NewTransaction() + tx.Add(&Table{ + Family: testFamily, + Name: testTable, + }) + err = nft.Check(context.TODO(), tx) + } + if err != nil { + return nil, fmt.Errorf("could not run nftables command: %w", err) + } + } + + requireDestroy := optionSet(options, RequireDestroy) + emulateDestroy := optionSet(options, EmulateDestroy) + if requireDestroy || emulateDestroy { + // Check if "nft destroy" is available. + tx = nft.NewTransaction() + tx.Destroy(&Table{}) + if err := nft.Check(context.TODO(), tx); err != nil { + if requireDestroy { + return nil, fmt.Errorf("`nft destroy` is not available: %w", err) + } + } else { + nft.hasDestroy = true + } + // Can't set this until after doing the test above + nft.emulateDestroy = emulateDestroy + } + + return nft, nil +} + +// New creates a new nftables.Interface. If nftables is not available/usable on the +// current host, it will return an error. +// +// Normally, family and table will specify the family and table to use for all operations +// on the returned Interface. However, if you leave them empty (`""`), then the Interface +// will have no associated family/table and (a) you must explicitly fill in those fields +// in any objects you use in a Transaction, (b) you can't use any of the List* methods. +// +// In addition to the family and table, you can specify additional comma-separated options +// to New(). The currently-supported options are: +// +// - NoObjectCommentEmulation: disables the default knftables.Interface behavior of +// ignoring comments on Table, Chain, Set, and Map objects if the underlying CLI or +// kernel does not support them. +// +// - RequireDestroy: require the system to support `nft destroy`; the New() call will +// fail with an error on older systems. +// +// - EmulateDestroy: adjust the API of `tx.Destroy()` to make it possible to emulate via +// `nft add` and `nft delete` on systems that do not have `nft destroy`; see the docs +// for `tx.Destroy()` for more details. +func New(family Family, table string, options ...Option) (Interface, error) { + return newInternal(family, table, realExec{}, options...) +} + +// NewTransaction is part of Interface +func (nft *realNFTables) NewTransaction() *Transaction { + return &Transaction{nftContext: &nft.nftContext} +} + +// Run is part of Interface +func (nft *realNFTables) Run(ctx context.Context, tx *Transaction) error { + nft.bufferMutex.Lock() + defer nft.bufferMutex.Unlock() + + if tx.err != nil { + return tx.err + } + + nft.buffer.Reset() + tx.populateCommandBuf(nft.buffer) + + cmd := exec.CommandContext(ctx, nft.path, "-f", "-") + cmd.Stdin = nft.buffer + _, err := nft.exec.Run(cmd) + return err +} + +// Check is part of Interface +func (nft *realNFTables) Check(ctx context.Context, tx *Transaction) error { + nft.bufferMutex.Lock() + defer nft.bufferMutex.Unlock() + + if tx.err != nil { + return tx.err + } + + nft.buffer.Reset() + tx.populateCommandBuf(nft.buffer) + + cmd := exec.CommandContext(ctx, nft.path, "--check", "-f", "-") + cmd.Stdin = nft.buffer + _, err := nft.exec.Run(cmd) + return err +} + +// jsonVal looks up key in json; if it exists and is of type T, it returns (json[key], true). +// Otherwise it returns (_, false). +func jsonVal[T any](json map[string]interface{}, key string) (T, bool) { + if ifVal, exists := json[key]; exists { + tVal, ok := ifVal.(T) + return tVal, ok + } + var zero T + return zero, false +} + +// parseJSONResult takes the output of "nft -j list", validates it, and returns the array +// of objects (including the "metainfo" object) +func parseJSONObjects(listOutput string) ([]map[string]map[string]interface{}, error) { + // listOutput should contain JSON looking like: + // + // { + // "nftables": [ + // { + // "metainfo": { + // "json_schema_version": 1, + // ... + // } + // }, + // { + // "chain": { + // "family": "ip", + // "table": "kube-proxy", + // "name": "KUBE-SERVICES", + // "handle": 3 + // } + // }, + // { + // "chain": { + // "family": "ip", + // "table": "kube-proxy", + // "name": "KUBE-NODEPORTS", + // "handle": 4 + // } + // }, + // ... + // ] + // } + // + // parseJSONResult returns the array of objects tagged "nftables". + + jsonResult := map[string][]map[string]map[string]interface{}{} + if err := json.Unmarshal([]byte(listOutput), &jsonResult); err != nil { + return nil, fmt.Errorf("could not parse nft output: %w", err) + } + + nftablesResult := jsonResult["nftables"] + if len(nftablesResult) == 0 { + return nil, fmt.Errorf("could not find result in nft output %q", listOutput) + } + metainfo := nftablesResult[0]["metainfo"] + if metainfo == nil { + return nil, fmt.Errorf("could not find metadata in nft output %q", listOutput) + } + // json_schema_version is an integer but `json.Unmarshal()` will have parsed it as + // a float64 since we didn't tell it otherwise. + if version, ok := jsonVal[float64](metainfo, "json_schema_version"); !ok || version != 1.0 { + return nil, fmt.Errorf("could not find supported json_schema_version in nft output %q", listOutput) + } + return nftablesResult, nil +} + +// getJSONObjects takes the output of "nft -j list", validates it, and returns an array +// of just the objects of objectType. +func getJSONObjects(listOutput, objectType string) ([]map[string]interface{}, error) { + // Given the result from the parseJSONObjects example above, and objectType + // "chain", we would return + // + // [ + // { + // "family": "ip", + // "table": "kube-proxy", + // "name": "KUBE-SERVICES", + // "handle": 3 + // }, + // { + // "family": "ip", + // "table": "kube-proxy", + // "name": "KUBE-NODEPORTS", + // "handle": 4 + // }, + // ... + // ] + + nftablesResult, err := parseJSONObjects(listOutput) + if err != nil { + return nil, err + } + + var objects []map[string]interface{} + for _, objContainer := range nftablesResult { + obj := objContainer[objectType] + if obj != nil { + objects = append(objects, obj) + } + } + return objects, nil +} + +// ListAll is part of Interface. +func (nft *realNFTables) ListAll(ctx context.Context) (map[string][]string, error) { + cmd := exec.CommandContext(ctx, nft.path, "--json", "list", "table", string(nft.family), nft.table) + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to run nft: %w", err) + } + + nftablesResult, err := parseJSONObjects(out) + if err != nil { + return nil, err + } + + result := make(map[string][]string) + for i, objContainer := range nftablesResult { + if i == 0 { + // Skip "metainfo" + continue + } + for objectType, obj := range objContainer { + if name, ok := jsonVal[string](obj, "name"); ok { + result[objectType] = append(result[objectType], name) + } + // Shouldn't be more than one field in objContainer, but ignore it + // if there is. + break + } + } + return result, nil +} + +// List is part of Interface. +func (nft *realNFTables) List(ctx context.Context, objectType string) ([]string, error) { + if nft.table == "" { + return nil, fmt.Errorf("can't use List() on a knftables.Interface with no associated family/table") + } + + // objectType is allowed to be either singular or plural. All currently-existing + // nftables object types have plural forms that are just the singular form plus 's', + // and none have singular forms ending in 's'. + if objectType[len(objectType)-1] == 's' { + objectType = objectType[:len(objectType)-1] + } + + // We want to restrict nft to looking only at our table, so we have to do "list table" + // rather than any variant of "list ". + cmd := exec.CommandContext(ctx, nft.path, "--json", "list", "table", string(nft.family), nft.table) + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to run nft: %w", err) + } + + objects, err := getJSONObjects(out, objectType) + if err != nil { + return nil, err + } + + var result []string + for _, obj := range objects { + if name, ok := jsonVal[string](obj, "name"); ok { + result = append(result, name) + } + } + return result, nil +} + +// ListRules is part of Interface +func (nft *realNFTables) ListRules(ctx context.Context, chain string) ([]*Rule, error) { + if nft.table == "" { + return nil, fmt.Errorf("can't use ListRules() on a knftables.Interface with no associated family/table") + } + + var cmd *exec.Cmd + if chain == "" { + cmd = exec.CommandContext(ctx, nft.path, "--json", "list", "table", string(nft.family), nft.table) + } else { + cmd = exec.CommandContext(ctx, nft.path, "--json", "list", "chain", string(nft.family), nft.table, chain) + } + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to run nft: %w", err) + } + + jsonRules, err := getJSONObjects(out, "rule") + if err != nil { + return nil, fmt.Errorf("unable to parse JSON output: %w", err) + } + + rules := make([]*Rule, 0, len(jsonRules)) + for _, jsonRule := range jsonRules { + parentChain, ok := jsonVal[string](jsonRule, "chain") + if !ok { + return nil, fmt.Errorf("unexpected JSON output from nft (rule with no chain)") + } + rule := &Rule{ + Chain: parentChain, + } + + // handle is written as an integer in nft's output, but json.Unmarshal + // will have parsed it as a float64. (Handles are uint64s, but they are + // assigned consecutively starting from 1, so as long as fewer than 2**53 + // nftables objects have been created since boot time, we won't run into + // float64-vs-uint64 precision issues.) + if handle, ok := jsonVal[float64](jsonRule, "handle"); ok { + rule.Handle = PtrTo(int(handle)) + } + if comment, ok := jsonVal[string](jsonRule, "comment"); ok { + rule.Comment = &comment + } + + rules = append(rules, rule) + } + return rules, nil +} + +// ListElements is part of Interface +func (nft *realNFTables) ListElements(ctx context.Context, objectType, name string) ([]*Element, error) { + if nft.table == "" { + return nil, fmt.Errorf("can't use ListElements() on a knftables.Interface with no associated family/table") + } + + cmd := exec.CommandContext(ctx, nft.path, "--json", "list", objectType, string(nft.family), nft.table, name) + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to run nft: %w", err) + } + + jsonSetsOrMaps, err := getJSONObjects(out, objectType) + if err != nil { + return nil, fmt.Errorf("unable to parse JSON output: %w", err) + } + if len(jsonSetsOrMaps) != 1 { + return nil, fmt.Errorf("unexpected JSON output from nft (multiple results)") + } + + jsonElements, _ := jsonVal[[]interface{}](jsonSetsOrMaps[0], "elem") + elements := make([]*Element, 0, len(jsonElements)) + for _, jsonElement := range jsonElements { + var key, value interface{} + + elem := &Element{} + if objectType == "set" { + elem.Set = name + key = jsonElement + } else { + elem.Map = name + tuple, ok := jsonElement.([]interface{}) + if !ok || len(tuple) != 2 { + return nil, fmt.Errorf("unexpected JSON output from nft (elem is not [key,val]: %q)", jsonElement) + } + key, value = tuple[0], tuple[1] + } + + // If the element has a comment or a counter, then key will be a compound + // object like: + // + // { + // "elem": { + // "val": "192.168.0.1", + // "comment": "this is a comment", + // "counter": { "packets": 0, "bytes": 0 } + // } + // } + // + // (Where "val" contains the value that key would have held if there was no + // comment.) + if obj, ok := key.(map[string]interface{}); ok { + if compoundElem, ok := jsonVal[map[string]interface{}](obj, "elem"); ok { + if key, ok = jsonVal[interface{}](compoundElem, "val"); !ok { + return nil, fmt.Errorf("unexpected JSON output from nft (elem with no val: %q)", jsonElement) + } + if comment, ok := jsonVal[string](compoundElem, "comment"); ok { + elem.Comment = &comment + } + } + } + + elem.Key, err = parseElementValue(key) + if err != nil { + return nil, err + } + if value != nil { + elem.Value, err = parseElementValue(value) + if err != nil { + return nil, err + } + } + + elements = append(elements, elem) + } + return elements, nil +} + +// parseElementValue parses a JSON element key/value, handling concatenations, prefixes, and +// converting numeric or "verdict" values to strings. +func parseElementValue(json interface{}) ([]string, error) { + // json can be: + // + // - a single string, e.g. "192.168.1.3" + // + // - a single number, e.g. 80 + // + // - a prefix, expressed as an object: + // { + // "prefix": { + // "addr": "192.168.0.0", + // "len": 16, + // } + // } + // + // - a concatenation, expressed as an object containing an array of simple + // values: + // { + // "concat": [ + // "192.168.1.3", + // "tcp", + // 80 + // ] + // } + // + // - a verdict (for a vmap value), expressed as an object: + // { + // "drop": null + // } + // + // { + // "goto": { + // "target": "destchain" + // } + // } + + switch val := json.(type) { + case string: + return []string{val}, nil + case float64: + return []string{fmt.Sprintf("%d", int(val))}, nil + case map[string]interface{}: + if concat, _ := jsonVal[[]interface{}](val, "concat"); concat != nil { + vals := make([]string, 0, len(concat)) + for i := range concat { + newVals, err := parseElementValue(concat[i]) + if err != nil { + return nil, err + } + vals = append(vals, newVals...) + } + return vals, nil + } else if prefix, _ := jsonVal[map[string]interface{}](val, "prefix"); prefix != nil { + // For prefix-type elements, return the element in CIDR representation. + addr, ok := jsonVal[string](prefix, "addr") + if !ok { + return nil, fmt.Errorf("could not parse 'addr' value as string: %q", prefix) + } + length, ok := jsonVal[float64](prefix, "len") + if !ok { + return nil, fmt.Errorf("could not parse 'len' value as number: %q", prefix) + } + return []string{fmt.Sprintf("%s/%d", addr, int(length))}, nil + } else if len(val) == 1 { + var verdict string + // We just checked that len(val) == 1, so this loop body will only + // run once + for k, v := range val { + if v == nil { + verdict = k + } else if target, ok := v.(map[string]interface{}); ok { + verdict = fmt.Sprintf("%s %s", k, target["target"]) + } + } + return []string{verdict}, nil + } + } + + return nil, fmt.Errorf("could not parse element value %q", json) +} + +// ListCounters is part of Interface +func (nft *realNFTables) ListCounters(ctx context.Context) ([]*Counter, error) { + if nft.table == "" { + return nil, fmt.Errorf("can't use ListCounters() on a knftables.Interface with no associated family/table") + } + + cmd := exec.CommandContext(ctx, nft.path, "--json", "list", "counters", "table", string(nft.family), nft.table) + out, err := nft.exec.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to run nft: %w", err) + } + + objects, err := getJSONObjects(out, "counter") + if err != nil { + return nil, err + } + + objectToCounter := func(object map[string]interface{}) *Counter { + counter := &Counter{ + Name: object["name"].(string), + Packets: PtrTo(uint64(object["packets"].(float64))), + Bytes: PtrTo(uint64(object["bytes"].(float64))), + } + if handle, ok := jsonVal[string](object, "comment"); ok { + counter.Comment = PtrTo(handle) + } + if handle, ok := jsonVal[float64](object, "handle"); ok { + counter.Handle = PtrTo(int(handle)) + } + + return counter + } + + counters := make([]*Counter, 0, len(objects)) + for _, object := range objects { + counters = append(counters, objectToCounter(object)) + } + return counters, nil +} diff --git a/vendor/sigs.k8s.io/knftables/objects.go b/vendor/sigs.k8s.io/knftables/objects.go new file mode 100644 index 0000000000..eb6e55e6f2 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/objects.go @@ -0,0 +1,856 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" +) + +func parseInt(numbersOnly string) *int { + i64, _ := strconv.ParseInt(numbersOnly, 10, 64) + i := int(i64) + return &i +} + +func parseUint(numbersOnly string) *uint64 { + ui64, _ := strconv.ParseUint(numbersOnly, 10, 64) + return &ui64 +} + +// getComment parses a match for the commentGroup regexp (below). To distinguish between empty comment and no comment, +// we capture comment with double quotes. +func getComment(commentGroup string) *string { + if commentGroup == "" { + return nil + } + noQuotes := strings.Trim(commentGroup, "\"") + return &noQuotes +} + +func getTable(ctx *nftContext, family Family, table string) (Family, string, error) { + switch { + case ctx.family == "" && family == "": + return "", "", fmt.Errorf("must specify family and table for each object when the Interface has no default") + case ctx.family != "" && family != "" && family != ctx.family: + return "", "", fmt.Errorf("cannot override family or table when the Interface has a default") + case ctx.family != "" && family == "": + family = ctx.family + } + + switch { + case ctx.table == "" && table == "": + return "", "", fmt.Errorf("must specify family and table for each object when the Interface has no default") + case ctx.table != "" && table != "" && table != ctx.table: + return "", "", fmt.Errorf("cannot override family or table when the Interface has a default") + case ctx.table != "" && table == "": + table = ctx.table + } + + return family, table, nil +} + +var commentGroup = `(".*")` +var noSpaceGroup = `([^ ]*)` +var numberGroup = `([0-9]*)` + +// Object implementation for Table +func (table *Table) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, table.Family, table.Name); err != nil { + return err + } + switch verb { + case addVerb, createVerb, flushVerb: + if table.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + case deleteVerb, destroyVerb: + // Handle can be nil or non-nil + default: + return fmt.Errorf("%s is not implemented for tables", verb) + } + + return nil +} + +func (table *Table) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, tableName, _ := getTable(ctx, table.Family, table.Name) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && table.Handle != nil { + fmt.Fprintf(writer, "%s table %s handle %d", verb, family, *table.Handle) + return + } + + // All other cases refer to the table by name + fmt.Fprintf(writer, "%s table %s %s", verb, family, tableName) + if verb == addVerb || verb == createVerb { + hasComment := table.Comment != nil && !ctx.noObjectComments + if hasComment || len(table.Flags) != 0 { + fmt.Fprintf(writer, " {") + if hasComment { + fmt.Fprintf(writer, " comment %q ;", *table.Comment) + } + if len(table.Flags) != 0 { + fmt.Fprintf(writer, " flags ") + for i := range table.Flags { + if i > 0 { + fmt.Fprintf(writer, ",") + } + fmt.Fprintf(writer, "%s", table.Flags[i]) + } + fmt.Fprintf(writer, " ;") + } + fmt.Fprintf(writer, " }") + } + } + fmt.Fprintf(writer, "\n") +} + +var tableRegexp = regexp.MustCompile(fmt.Sprintf( + `(?:{ (?:comment %s ; )?(?:flags %s ; )?})?`, commentGroup, noSpaceGroup)) + +func parseTableFlags(s string) []TableFlag { + var res []TableFlag + for _, flag := range strings.Split(s, ",") { + res = append(res, TableFlag(flag)) + } + return res +} + +func (table *Table) parse(family Family, tableName, line string) error { + match := tableRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing table add command") + } + table.Family = family + table.Name = tableName + table.Comment = getComment(match[1]) + if match[2] != "" { + table.Flags = parseTableFlags(match[2]) + } + return nil +} + +// Object implementation for Chain +func (chain *Chain) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, chain.Family, chain.Table); err != nil { + return err + } + if chain.Hook == nil { + if chain.Type != nil || chain.Priority != nil { + return fmt.Errorf("regular chain %q must not specify Type or Priority", chain.Name) + } + if chain.Policy != nil { + return fmt.Errorf("regular chain %q must not specify Policy", chain.Name) + } + if chain.Device != nil { + return fmt.Errorf("regular chain %q must not specify Device", chain.Name) + } + } else { + if chain.Type == nil || chain.Priority == nil { + return fmt.Errorf("base chain %q must specify Type and Priority", chain.Name) + } + } + + switch verb { + case addVerb, createVerb, flushVerb: + if chain.Name == "" { + return fmt.Errorf("no name specified for chain") + } + if chain.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + case deleteVerb, destroyVerb: + if chain.Name == "" && chain.Handle == nil { + return fmt.Errorf("must specify either name or handle") + } + default: + return fmt.Errorf("%s is not implemented for chains", verb) + } + + return nil +} + +func (chain *Chain) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, chain.Family, chain.Table) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && chain.Handle != nil { + fmt.Fprintf(writer, "%s chain %s %s handle %d", verb, family, table, *chain.Handle) + return + } + + fmt.Fprintf(writer, "%s chain %s %s %s", verb, family, table, chain.Name) + if verb == addVerb || verb == createVerb { + if chain.Type != nil || (chain.Comment != nil && !ctx.noObjectComments) { + fmt.Fprintf(writer, " {") + + if chain.Type != nil { + fmt.Fprintf(writer, " type %s hook %s", *chain.Type, *chain.Hook) + if chain.Device != nil { + fmt.Fprintf(writer, " device %q", *chain.Device) + } + + // Parse the priority to a number if we can, because older + // versions of nft don't accept certain named priorities + // in all contexts (eg, "dstnat" priority in the "output" + // hook). + if priority, err := ParsePriority(family, string(*chain.Priority)); err == nil { + fmt.Fprintf(writer, " priority %d ;", priority) + } else { + fmt.Fprintf(writer, " priority %s ;", *chain.Priority) + } + if chain.Policy != nil { + fmt.Fprintf(writer, " policy %s ;", *chain.Policy) + } + } + if chain.Comment != nil && !ctx.noObjectComments { + fmt.Fprintf(writer, " comment %q ;", *chain.Comment) + } + + fmt.Fprintf(writer, " }") + } + } + + fmt.Fprintf(writer, "\n") +} + +// groups in []: [1]%s(?: {(?: type [2]%s hook [3]%s(?: device "[4]%s")(?: priority [5]%s ;)(?: policy [6]%s ;)?)(?: comment [7]%s ;) }) +var chainRegexp = regexp.MustCompile(fmt.Sprintf( + `%s(?: {(?: type %s hook %s(?: device "%s")?(?: priority %s ;)(?: policy %s ;)?)?(?: comment %s ;)? })?`, + noSpaceGroup, noSpaceGroup, noSpaceGroup, noSpaceGroup, noSpaceGroup, noSpaceGroup, commentGroup)) + +func (chain *Chain) parse(family Family, table, line string) error { + match := chainRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing chain add command") + } + chain.Family = family + chain.Table = table + chain.Name = match[1] + chain.Comment = getComment(match[7]) + if match[2] != "" { + chain.Type = (*BaseChainType)(&match[2]) + } + if match[3] != "" { + chain.Hook = (*BaseChainHook)(&match[3]) + } + if match[4] != "" { + chain.Device = &match[4] + } + if match[5] != "" { + chain.Priority = (*BaseChainPriority)(&match[5]) + } + if match[6] != "" { + chain.Policy = (*BaseChainPolicy)(&match[6]) + } + return nil +} + +// Object implementation for Rule +func (rule *Rule) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, rule.Family, rule.Table); err != nil { + return err + } + if rule.Chain == "" { + return fmt.Errorf("no chain name specified for rule") + } + + if rule.Index != nil && rule.Handle != nil { + return fmt.Errorf("cannot specify both Index and Handle") + } + + switch verb { + case addVerb, insertVerb: + if rule.Rule == "" { + return fmt.Errorf("no rule specified") + } + case replaceVerb: + if rule.Rule == "" { + return fmt.Errorf("no rule specified") + } + if rule.Handle == nil { + return fmt.Errorf("must specify Handle with %s", verb) + } + case deleteVerb, destroyVerb: + if rule.Handle == nil { + return fmt.Errorf("must specify Handle with %s", verb) + } + default: + return fmt.Errorf("%s is not implemented for rules", verb) + } + + return nil +} + +func (rule *Rule) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, rule.Family, rule.Table) + + fmt.Fprintf(writer, "%s rule %s %s %s", verb, family, table, rule.Chain) + if rule.Index != nil { + fmt.Fprintf(writer, " index %d", *rule.Index) + } else if rule.Handle != nil { + fmt.Fprintf(writer, " handle %d", *rule.Handle) + } + + switch verb { + case addVerb, insertVerb, replaceVerb: + fmt.Fprintf(writer, " %s", rule.Rule) + + if rule.Comment != nil { + fmt.Fprintf(writer, " comment %q", *rule.Comment) + } + } + + fmt.Fprintf(writer, "\n") +} + +// groups in []: [1]%s(?: index [2]%s)?(?: handle [3]%s)? [4]([^"]*)(?: comment [5]%s)?$ +var ruleRegexp = regexp.MustCompile(fmt.Sprintf( + `%s(?: index %s)?(?: handle %s)? ([^"]*)(?: comment %s)?$`, + noSpaceGroup, numberGroup, numberGroup, commentGroup)) + +func (rule *Rule) parse(family Family, table, line string) error { + match := ruleRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing rule add command") + } + rule.Family = family + rule.Table = table + rule.Chain = match[1] + rule.Rule = match[4] + rule.Comment = getComment(match[5]) + if match[2] != "" { + rule.Index = parseInt(match[2]) + } + if match[3] != "" { + rule.Handle = parseInt(match[3]) + } + return nil +} + +// Object implementation for Set +func (set *Set) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, set.Family, set.Table); err != nil { + return err + } + switch verb { + case addVerb, createVerb: + if set.Name == "" { + return fmt.Errorf("no name specified for set") + } + if (set.Type == "" && set.TypeOf == "") || (set.Type != "" && set.TypeOf != "") { + return fmt.Errorf("set must specify either Type or TypeOf") + } + if set.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + case flushVerb: + if set.Name == "" { + return fmt.Errorf("no name specified for set") + } + case deleteVerb, destroyVerb: + if set.Name == "" && set.Handle == nil { + return fmt.Errorf("must specify either name or handle") + } + default: + return fmt.Errorf("%s is not implemented for sets", verb) + } + + return nil +} + +func (set *Set) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, set.Family, set.Table) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && set.Handle != nil { + fmt.Fprintf(writer, "%v set %s %s handle %d", verb, family, table, *set.Handle) + return + } + + fmt.Fprintf(writer, "%s set %s %s %s", verb, family, table, set.Name) + if verb == addVerb || verb == createVerb { + fmt.Fprintf(writer, " {") + + if set.Type != "" { + fmt.Fprintf(writer, " type %s ;", set.Type) + } else { + fmt.Fprintf(writer, " typeof %s ;", set.TypeOf) + } + + if len(set.Flags) != 0 { + fmt.Fprintf(writer, " flags ") + for i := range set.Flags { + if i > 0 { + fmt.Fprintf(writer, ",") + } + fmt.Fprintf(writer, "%s", set.Flags[i]) + } + fmt.Fprintf(writer, " ;") + } + + if set.Timeout != nil { + fmt.Fprintf(writer, " timeout %ds ;", int64(set.Timeout.Seconds())) + } + if set.GCInterval != nil { + fmt.Fprintf(writer, " gc-interval %ds ;", int64(set.GCInterval.Seconds())) + } + if set.Size != nil { + fmt.Fprintf(writer, " size %d ;", *set.Size) + } + if set.Policy != nil { + fmt.Fprintf(writer, " policy %s ;", *set.Policy) + } + if set.AutoMerge != nil && *set.AutoMerge { + fmt.Fprintf(writer, " auto-merge ;") + } + + if set.Comment != nil && !ctx.noObjectComments { + fmt.Fprintf(writer, " comment %q ;", *set.Comment) + } + + fmt.Fprintf(writer, " }") + } + + fmt.Fprintf(writer, "\n") +} + +func (set *Set) parse(family Family, table, line string) error { + match := setRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing set add command") + } + set.Family = family + set.Table = table + set.Name, set.Type, set.TypeOf, set.Flags, set.Timeout, set.GCInterval, + set.Size, set.Policy, set.Comment, set.AutoMerge = parseMapAndSetProps(match) + return nil +} + +// Object implementation for Map +func (mapObj *Map) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, mapObj.Family, mapObj.Table); err != nil { + return err + } + switch verb { + case addVerb, createVerb: + if mapObj.Name == "" { + return fmt.Errorf("no name specified for map") + } + if (mapObj.Type == "" && mapObj.TypeOf == "") || (mapObj.Type != "" && mapObj.TypeOf != "") { + return fmt.Errorf("map must specify either Type or TypeOf") + } + if mapObj.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + case flushVerb: + if mapObj.Name == "" { + return fmt.Errorf("no name specified for map") + } + case deleteVerb, destroyVerb: + if mapObj.Name == "" && mapObj.Handle == nil { + return fmt.Errorf("must specify either name or handle") + } + default: + return fmt.Errorf("%s is not implemented for maps", verb) + } + + return nil +} + +func (mapObj *Map) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, mapObj.Family, mapObj.Table) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && mapObj.Handle != nil { + fmt.Fprintf(writer, "%v map %s %s handle %d", verb, family, table, *mapObj.Handle) + return + } + + fmt.Fprintf(writer, "%s map %s %s %s", verb, family, table, mapObj.Name) + if verb == addVerb || verb == createVerb { + fmt.Fprintf(writer, " {") + + if mapObj.Type != "" { + fmt.Fprintf(writer, " type %s ;", mapObj.Type) + } else { + fmt.Fprintf(writer, " typeof %s ;", mapObj.TypeOf) + } + + if len(mapObj.Flags) != 0 { + fmt.Fprintf(writer, " flags ") + for i := range mapObj.Flags { + if i > 0 { + fmt.Fprintf(writer, ",") + } + fmt.Fprintf(writer, "%s", mapObj.Flags[i]) + } + fmt.Fprintf(writer, " ;") + } + + if mapObj.Timeout != nil { + fmt.Fprintf(writer, " timeout %ds ;", int64(mapObj.Timeout.Seconds())) + } + if mapObj.GCInterval != nil { + fmt.Fprintf(writer, " gc-interval %ds ;", int64(mapObj.GCInterval.Seconds())) + } + if mapObj.Size != nil { + fmt.Fprintf(writer, " size %d ;", *mapObj.Size) + } + if mapObj.Policy != nil { + fmt.Fprintf(writer, " policy %s ;", *mapObj.Policy) + } + + if mapObj.Comment != nil && !ctx.noObjectComments { + fmt.Fprintf(writer, " comment %q ;", *mapObj.Comment) + } + + fmt.Fprintf(writer, " }") + } + + fmt.Fprintf(writer, "\n") +} + +func (mapObj *Map) parse(family Family, table, line string) error { + match := mapRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing map add command") + } + mapObj.Family = family + mapObj.Table = table + mapObj.Name, mapObj.Type, mapObj.TypeOf, mapObj.Flags, mapObj.Timeout, mapObj.GCInterval, + mapObj.Size, mapObj.Policy, mapObj.Comment, _ = parseMapAndSetProps(match) + return nil +} + +var autoMergeProp = `( auto-merge ;)?` + +// groups in []: [1]%s {(?: [2](type|typeof) [3]([^;]*)) ;(?: flags [4]([^;]*) ;)?(?: timeout [5]%ss ;)?(?: gc-interval [6]%ss ;)?(?: size [7]%s ;)?(?: policy [8]%s ;)?[9]%s(?: comment [10]%s ;)? } +var mapOrSet = `%s {(?: (type|typeof) ([^;]*)) ;(?: flags ([^;]*) ;)?(?: timeout %ss ;)?(?: gc-interval %ss ;)?(?: size %s ;)?(?: policy %s ;)?%s(?: comment %s ;)? }` +var mapRegexp = regexp.MustCompile(fmt.Sprintf(mapOrSet, noSpaceGroup, numberGroup, numberGroup, noSpaceGroup, noSpaceGroup, "", commentGroup)) +var setRegexp = regexp.MustCompile(fmt.Sprintf(mapOrSet, noSpaceGroup, numberGroup, numberGroup, noSpaceGroup, noSpaceGroup, autoMergeProp, commentGroup)) + +func parseMapAndSetProps(match []string) (name string, typeProp string, typeOf string, flags []SetFlag, + timeout *time.Duration, gcInterval *time.Duration, size *uint64, policy *SetPolicy, comment *string, autoMerge *bool) { + name = match[1] + // set and map have different number of match groups, but comment is always the last + comment = getComment(match[len(match)-1]) + if match[2] == "type" { + typeProp = match[3] + } else { + typeOf = match[3] + } + if match[4] != "" { + flags = parseSetFlags(match[4]) + } + if match[5] != "" { + timeoutObj, _ := time.ParseDuration(match[5] + "s") + timeout = &timeoutObj + } + if match[6] != "" { + gcIntervalObj, _ := time.ParseDuration(match[6] + "s") + gcInterval = &gcIntervalObj + } + if match[7] != "" { + size = parseUint(match[7]) + } + if match[8] != "" { + policy = (*SetPolicy)(&match[8]) + } + if len(match) > 10 { + // set + if match[9] != "" { + autoMergeObj := true + autoMerge = &autoMergeObj + } + } + return +} + +func parseSetFlags(s string) []SetFlag { + var res []SetFlag + for _, flag := range strings.Split(s, ",") { + res = append(res, SetFlag(flag)) + } + return res +} + +// Object implementation for Element +func (element *Element) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, element.Family, element.Table); err != nil { + return err + } + if element.Map == "" && element.Set == "" { + return fmt.Errorf("no set/map name specified for element") + } else if element.Set != "" && element.Map != "" { + return fmt.Errorf("element specifies both a set name and a map name") + } + + if len(element.Key) == 0 { + return fmt.Errorf("no key specified for element") + } + if element.Set != "" && len(element.Value) != 0 { + return fmt.Errorf("map value specified for set element") + } + + switch verb { + case addVerb, createVerb: + if element.Map != "" && len(element.Value) == 0 { + return fmt.Errorf("no map value specified for map element") + } + case deleteVerb, destroyVerb: + default: + return fmt.Errorf("%s is not implemented for elements", verb) + } + + return nil +} + +func (element *Element) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, element.Family, element.Table) + + name := element.Set + if name == "" { + name = element.Map + } + + fmt.Fprintf(writer, "%s element %s %s %s { %s", verb, family, table, name, + strings.Join(element.Key, " . ")) + + if verb == addVerb || verb == createVerb { + if element.Comment != nil { + fmt.Fprintf(writer, " comment %q", *element.Comment) + } + + if len(element.Value) != 0 { + fmt.Fprintf(writer, " : %s", strings.Join(element.Value, " . ")) + } + } + + fmt.Fprintf(writer, " }\n") +} + +// groups in []: [1]%s { [2]([^:"]*)(?: comment [3]%s)? : [4](.*) } +var mapElementRegexp = regexp.MustCompile(fmt.Sprintf( + `%s { ([^"]*)(?: comment %s)? : (.*) }`, noSpaceGroup, commentGroup)) + +// groups in []: [1]%s { [2]([^:"]*)(?: comment [3]%s)? } +var setElementRegexp = regexp.MustCompile(fmt.Sprintf( + `%s { ([^"]*)(?: comment %s)? }`, noSpaceGroup, commentGroup)) + +func (element *Element) parse(family Family, table, line string) error { + // try to match map element first, since it has more groups, and if it matches, then we can be sure + // this is map element. + match := mapElementRegexp.FindStringSubmatch(line) + if match == nil { + match = setElementRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing element add command") + } + } + element.Family = family + element.Table = table + element.Comment = getComment(match[3]) + mapOrSetName := match[1] + element.Key = append(element.Key, strings.Split(match[2], " . ")...) + if len(match) == 5 { + // map regex matched + element.Map = mapOrSetName + element.Value = append(element.Value, strings.Split(match[4], " . ")...) + } else { + element.Set = mapOrSetName + } + return nil +} + +// Object implementation for Flowtable +func (flowtable *Flowtable) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, flowtable.Family, flowtable.Table); err != nil { + return err + } + switch verb { + case addVerb, createVerb: + if flowtable.Name == "" { + return fmt.Errorf("no name specified for flowtable") + } + if flowtable.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + case deleteVerb, destroyVerb: + if flowtable.Name == "" && flowtable.Handle == nil { + return fmt.Errorf("must specify either name or handle") + } + default: + return fmt.Errorf("%s is not implemented for flowtables", verb) + } + + return nil +} + +func (flowtable *Flowtable) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, flowtable.Family, flowtable.Table) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && flowtable.Handle != nil { + fmt.Fprintf(writer, "delete flowtable %s %s handle %d", family, table, *flowtable.Handle) + return + } + + fmt.Fprintf(writer, "%s flowtable %s %s %s", verb, family, table, flowtable.Name) + if verb == addVerb || verb == createVerb { + fmt.Fprintf(writer, " {") + + if flowtable.Priority != nil { + // since there is only one priority value allowed "filter" just use the value + // provided and not try to parse it. + fmt.Fprintf(writer, " hook ingress priority %s ;", *flowtable.Priority) + } + + if len(flowtable.Devices) > 0 { + fmt.Fprintf(writer, " devices = { %s } ;", strings.Join(flowtable.Devices, ", ")) + } + + fmt.Fprintf(writer, " }") + } + + fmt.Fprintf(writer, "\n") +} + +// nft add flowtable inet example_table example_flowtable { hook ingress priority filter ; devices = { eth0 }; } +var flowtableRegexp = regexp.MustCompile(fmt.Sprintf( + `%s(?: {(?: hook ingress priority %s ;)(?: devices = {(.*)} ;) })?`, + noSpaceGroup, noSpaceGroup)) + +func (flowtable *Flowtable) parse(family Family, table, line string) error { + match := flowtableRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing flowtableRegexp add command") + } + flowtable.Family = family + flowtable.Table = table + flowtable.Name = match[1] + if match[2] != "" { + flowtable.Priority = (*FlowtableIngressPriority)(&match[2]) + } + // to avoid complex regular expressions the regex match everything between the brackets + // to match a single interface or a comma separated list of interfaces, and it is postprocessed + // here to remove the whitespaces. + if match[3] != "" { + devices := strings.Split(strings.TrimSpace(match[3]), ",") + for i := range devices { + devices[i] = strings.TrimSpace(devices[i]) + } + if len(devices) > 0 { + flowtable.Devices = devices + } + } + return nil +} + +// nft add counter [family] table name [{ [ packets packets bytes bytes ; ] [ comment comment ; }] +// ([^ ]*)(?: {(?: packets ([0-9]*) bytes ([0-9]*) ;)?(?: comment (".*") ;)? })? +var counterRegexp = regexp.MustCompile(fmt.Sprintf( + `%s(?: {(?: packets %s bytes %s ;)?(?: comment %s ;)? })?`, + noSpaceGroup, numberGroup, numberGroup, commentGroup)) + +func (counter *Counter) parse(family Family, table, line string) error { + match := counterRegexp.FindStringSubmatch(line) + if match == nil { + return fmt.Errorf("failed parsing table add command") + } + counter.Family = family + counter.Table = table + counter.Name = match[1] + if match[2] != "" { + counter.Packets = PtrTo(uint64(*parseInt(match[2]))) + } + if match[3] != "" { + counter.Bytes = PtrTo(uint64(*parseInt(match[3]))) + } + if match[4] != "" { + counter.Comment = getComment(match[4]) + } + return nil +} + +// Object implementation for Counter +func (counter *Counter) validate(verb verb, ctx *nftContext) error { + if _, _, err := getTable(ctx, counter.Family, counter.Table); err != nil { + return err + } + switch verb { + case addVerb, createVerb: + if counter.Name == "" { + return fmt.Errorf("no counter name specified") + } + if counter.Handle != nil { + return fmt.Errorf("cannot specify Handle in %s operation", verb) + } + if counter.Packets != nil && counter.Bytes == nil { + return fmt.Errorf("cannot specify Packets without Bytes in %s operation", verb) + } + if counter.Packets == nil && counter.Bytes != nil { + return fmt.Errorf("cannot specify Bytes without Packets in %s operation", verb) + } + case deleteVerb, destroyVerb: + if counter.Name == "" && counter.Handle == nil { + return fmt.Errorf("neither counter name nor handle specified") + } + case resetVerb: + if counter.Name == "" { + return fmt.Errorf("no counter name specified") + } + default: + return fmt.Errorf("%s is not implemented for counters", verb) + } + return nil +} + +func (counter *Counter) writeOperation(verb verb, ctx *nftContext, writer io.Writer) { + family, table, _ := getTable(ctx, counter.Family, counter.Table) + + // Special case for delete-by-handle + if (verb == deleteVerb || verb == destroyVerb) && counter.Handle != nil { + fmt.Fprintf(writer, "%s counter %s %s handle %d", verb, family, table, *counter.Handle) + return + } + + fmt.Fprintf(writer, "%s counter %s %s ", verb, family, table) + switch verb { + case addVerb, createVerb: + fmt.Fprint(writer, counter.Name) + if counter.Comment != nil || counter.Packets != nil || counter.Bytes != nil { + fmt.Fprintf(writer, " {") + if counter.Packets != nil && counter.Bytes != nil { + fmt.Fprintf(writer, " packets %d bytes %d ;", *counter.Packets, *counter.Bytes) + } + if counter.Comment != nil && (verb == addVerb || verb == createVerb) { + fmt.Fprintf(writer, " comment %q ;", *counter.Comment) + } + fmt.Fprintf(writer, " }") + } + default: + fmt.Fprint(writer, counter.Name) + } + fmt.Fprintf(writer, "\n") +} diff --git a/vendor/sigs.k8s.io/knftables/transaction.go b/vendor/sigs.k8s.io/knftables/transaction.go new file mode 100644 index 0000000000..7caf75280e --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/transaction.go @@ -0,0 +1,188 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "bytes" + "fmt" +) + +// Transaction represents an nftables transaction +type Transaction struct { + *nftContext + + operations []operation + err error +} + +// operation contains a single nftables operation (eg "add table", "flush chain") +type operation struct { + verb verb + obj Object +} + +// verb is used internally to represent the different "nft" verbs +type verb string + +const ( + addVerb verb = "add" + createVerb verb = "create" + insertVerb verb = "insert" + replaceVerb verb = "replace" + deleteVerb verb = "delete" + destroyVerb verb = "destroy" + flushVerb verb = "flush" + resetVerb verb = "reset" +) + +// populateCommandBuf populates the transaction as series of nft commands to the given bytes.Buffer. +func (tx *Transaction) populateCommandBuf(buf *bytes.Buffer) { + for _, op := range tx.operations { + op.obj.writeOperation(op.verb, tx.nftContext, buf) + } +} + +// String returns the transaction as a string containing the nft commands; if there is +// a pending error, it will be output as a comment at the end of the transaction. +func (tx *Transaction) String() string { + buf := &bytes.Buffer{} + tx.populateCommandBuf(buf) + if tx.err != nil { + fmt.Fprintf(buf, "# ERROR: %v", tx.err) + } + + return buf.String() +} + +// NumOperations returns the number of operations queued in the transaction. +func (tx *Transaction) NumOperations() int { + return len(tx.operations) +} + +func (tx *Transaction) operation(verb verb, obj Object) { + if tx.err != nil { + return + } + if tx.err = obj.validate(verb, tx.nftContext); tx.err != nil { + return + } + + tx.operations = append(tx.operations, operation{verb: verb, obj: obj}) +} + +// Add adds an "nft add" operation to tx, ensuring that obj exists by creating it if it +// did not already exist. (If obj is a Rule, it will be appended to the end of its chain, +// or else added after the Rule indicated by this rule's Index or Handle.) The Add() call +// always succeeds, but if obj is invalid, or inconsistent with the existing nftables +// state, then an error will be returned when the transaction is Run. +func (tx *Transaction) Add(obj Object) { + tx.operation(addVerb, obj) +} + +// Create adds an "nft create" operation to tx, creating obj, which must not already +// exist. (If obj is a Rule, it will be appended to the end of its chain, or else added +// after the Rule indicated by this rule's Index or Handle.) The Create() call always +// succeeds, but if obj is invalid, already exists, or is inconsistent with the existing +// nftables state, then an error will be returned when the transaction is Run. +func (tx *Transaction) Create(obj Object) { + tx.operation(createVerb, obj) +} + +// Insert adds an "nft insert" operation to tx, inserting obj (which must be a Rule) at +// the start of its chain, or before the other Rule indicated by this rule's Index or +// Handle. The Insert() call always succeeds, but if obj is invalid or is inconsistent +// with the existing nftables state, then an error will be returned when the transaction +// is Run. +func (tx *Transaction) Insert(obj Object) { + tx.operation(insertVerb, obj) +} + +// Replace adds an "nft replace" operation to tx, replacing an existing rule with obj +// (which must be a Rule). The Replace() call always succeeds, but if obj is invalid, does +// not contain the Handle of an existing rule, or is inconsistent with the existing +// nftables state, then an error will be returned when the transaction is Run. +func (tx *Transaction) Replace(obj Object) { + tx.operation(replaceVerb, obj) +} + +// Flush adds an "nft flush" operation to tx, clearing the contents of obj. The Flush() +// call always succeeds, but if obj does not exist (or does not support flushing) then an +// error will be returned when the transaction is Run. +func (tx *Transaction) Flush(obj Object) { + tx.operation(flushVerb, obj) +} + +// Delete adds an "nft delete" operation to tx, deleting obj, which must exist. The +// Delete() call always succeeds, but if obj does not exist or cannot be deleted based on +// the information provided (eg, Handle is required but not set) then an error will be +// returned when the transaction is Run. +func (tx *Transaction) Delete(obj Object) { + tx.operation(deleteVerb, obj) +} + +// Reset adds a "nft reset" operation to tx, resetting obj (which must be a Counter). +// The Reset() call always succeeds, but if obj does not exist then an error will be +// returned when the transaction is Run. +func (tx *Transaction) Reset(obj Object) { + tx.operation(resetVerb, obj) +} + +// Destroy adds an "nft destroy" operation to tx, ensuring that obj does not exist, by +// deleting it if it does exist. The Destroy() call always succeeds, but if obj cannot be +// deleted based on the information provided (eg, Handle is required but not set) then an +// error will be returned when the transaction is Run. +// +// Support for the actual "nft destroy" command requires kernel 6.3+ and nft 1.0.7+. You +// can create the Interface with the `RequireDestroy` option if you want construction to +// fail on older hosts. Alternatively, you can create the interface with the +// `EmulateDestroy` option, in which case knftables will emulate Destroy by doing an +// Add+Delete. In that case, obj must be valid for both an Add and a Delete. (Even if the +// system you are on supports destroy, you may only call Destroy() in a +// backward-compatible way if you are using `EmulateDestroy`.) In particular, this means: +// +// - You can only Destroy() objects by Name or Key, not by Handle. +// - You can't Destroy() a Rule (since they can only be deleted by Handle). +// - You do not need to include optional values in obj (e.g. base chain properties) but +// if you do include them, they need to be correct. +// - When Destroy()ing a Set or Map you must include the correct Type. +// - When Destroy()ing a Map Element you must include the correct Value. +func (tx *Transaction) Destroy(obj Object) { + if tx.err != nil { + return + } + if tx.err = obj.validate(destroyVerb, tx.nftContext); tx.err != nil { + return + } + + if tx.emulateDestroy { + err := obj.validate(addVerb, tx.nftContext) + if err == nil { + err = obj.validate(deleteVerb, tx.nftContext) + } + if err != nil { + tx.err = fmt.Errorf("object is not compatible with EmulateDestroy: %w", err) + return + } + } + + if tx.emulateDestroy && !tx.nftContext.hasDestroy { + tx.operations = append(tx.operations, operation{verb: addVerb, obj: obj}) + tx.operations = append(tx.operations, operation{verb: deleteVerb, obj: obj}) + } else { + tx.operations = append(tx.operations, operation{verb: destroyVerb, obj: obj}) + } +} diff --git a/vendor/sigs.k8s.io/knftables/types.go b/vendor/sigs.k8s.io/knftables/types.go new file mode 100644 index 0000000000..3a34370287 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/types.go @@ -0,0 +1,532 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "io" + "time" +) + +const ( + // Maximum length of a table, chain, set, etc, name + NameLengthMax = 256 + + // Maximum length of a comment + CommentLengthMax = 128 +) + +// Object is the interface for an nftables object. All of the concrete object types +// implement this interface. +type Object interface { + // validate validates an object for an operation + validate(verb verb, ctx *nftContext) error + + // writeOperation writes out an "nft" operation involving the object. It assumes + // that the object has been validated. + writeOperation(verb verb, ctx *nftContext, writer io.Writer) + + // parse is the opposite of writeOperation; it fills Object fields based on an "nft add" + // command. line is the part of the line after "nft add " + // (so for most types it starts with the object name). + // If error is returned, Object's fields may be partially filled, therefore Object should not be used. + parse(family Family, table, line string) error +} + +// Family is an nftables family +type Family string + +const ( + // IPv4Family represents the "ip" nftables family, for IPv4 rules. + IPv4Family Family = "ip" + + // IPv6Family represents the "ip6" nftables family, for IPv6 rules. + IPv6Family Family = "ip6" + + // InetFamily represents the "inet" nftables family, for mixed IPv4 and IPv6 rules. + InetFamily Family = "inet" + + // ARPFamily represents the "arp" nftables family, for ARP rules. + ARPFamily Family = "arp" + + // BridgeFamily represents the "bridge" nftables family, for rules operating + // on packets traversing a bridge. + BridgeFamily Family = "bridge" + + // NetDevFamily represents the "netdev" nftables family, for rules operating on + // the device ingress/egress path. + NetDevFamily Family = "netdev" +) + +// TableFlag represents a table flag +type TableFlag string + +const ( + // DormantFlag indicates that a table is not currently evaluated. (Its base chains + // are unregistered.) + DormantFlag TableFlag = "dormant" +) + +// Table represents an nftables table. +type Table struct { + // Family is the nftables family of the table. You do not normally need to fill + // this in because it will be filled in for you automatically from the Interface. + Family Family + + // Name is the name of the table. You do not normally need to fill this in + // because it will be filled in for you automatically from the Interface. + Name string + + // Comment is an optional comment for the table. (Requires kernel >= 5.10 and + // nft >= 0.9.7; otherwise this field will be silently ignored. Requires + // nft >= 1.0.8 to include comments in List() results.) + Comment *string + + // Flags are the table flags + Flags []TableFlag + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil. + Handle *int +} + +// BaseChainType represents the "type" of a "base chain" (ie, a chain that is attached to a hook). +// See https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains#Base_chain_types +type BaseChainType string + +const ( + // FilterType is the chain type for basic packet filtering. + FilterType BaseChainType = "filter" + + // NATType is the chain type for doing DNAT, SNAT, and masquerading. + // NAT operations are only available from certain hooks. + NATType BaseChainType = "nat" + + // RouteType is the chain type for rules that change the routing of packets. + // Chains of this type can only be added to the "output" hook. + RouteType BaseChainType = "route" +) + +// BaseChainHook represents the "hook" that a base chain is attached to. +// See https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains#Base_chain_hooks +// and https://wiki.nftables.org/wiki-nftables/index.php/Netfilter_hooks +type BaseChainHook string + +const ( + // PreroutingHook is the "prerouting" stage of packet processing, which is the + // first stage (after "ingress") for inbound ("input path" and "forward path") + // packets. + PreroutingHook BaseChainHook = "prerouting" + + // InputHook is the "input" stage of packet processing, which happens after + // "prerouting" for inbound packets being delivered to an interface on this host, + // in this network namespace. + InputHook BaseChainHook = "input" + + // ForwardHook is the "forward" stage of packet processing, which happens after + // "prerouting" for inbound packets destined for a non-local IP (i.e. on another + // host or in another network namespace) + ForwardHook BaseChainHook = "forward" + + // OutputHook is the "output" stage of packet processing, which is the first stage + // for outbound packets, regardless of their final destination. + OutputHook BaseChainHook = "output" + + // PostroutingHook is the "postrouting" stage of packet processing, which is the + // final stage (before "egress") for outbound ("forward path" and "output path") + // packets. + PostroutingHook BaseChainHook = "postrouting" + + // IngressHook is the "ingress" stage of packet processing, in the "netdev" family + // or (with kernel >= 5.10 and nft >= 0.9.7) the "inet" family. + IngressHook BaseChainHook = "ingress" + + // EgressHook is the "egress" stage of packet processing, in the "netdev" family + // (with kernel >= 5.16 and nft >= 1.0.1). + EgressHook BaseChainHook = "egress" +) + +// BaseChainPriority represents the "priority" of a base chain. Lower values run earlier. +// See https://wiki.nftables.org/wiki-nftables/index.php/Configuring_chains#Base_chain_priority +// and https://wiki.nftables.org/wiki-nftables/index.php/Netfilter_hooks#Priority_within_hook +// +// In addition to the const values, you can also use a signed integer value, or an +// arithmetic expression consisting of a const value followed by "+" or "-" and an +// integer. +type BaseChainPriority string + +const ( + // RawPriority is the earliest named priority. In particular, it can be used for + // rules that need to run before conntrack. It is equivalent to the value -300 and + // can be used in the ip, ip6, and inet families. + RawPriority BaseChainPriority = "raw" + + // ManglePriority is the standard priority for packet-rewriting operations. It is + // equivalent to the value -150 and can be used in the ip, ip6, and inet families. + ManglePriority BaseChainPriority = "mangle" + + // DNATPriority is the standard priority for DNAT operations. In the ip, ip6, and + // inet families, it is equivalent to the value -100. In the bridge family it is + // equivalent to the value -300. In both cases it can only be used from the + // prerouting hook. + DNATPriority BaseChainPriority = "dstnat" + + // FilterPriority is the standard priority for filtering operations. In the ip, + // ip6, inet, arp, and netdev families, it is equivalent to the value 0. In the + // bridge family it is equivalent to the value -200. + FilterPriority BaseChainPriority = "filter" + + // OutPriority is FIXME. It is equivalent to the value 300 and can only be used in + // the bridge family. + OutPriority BaseChainPriority = "out" + + // SecurityPriority is the standard priority for security operations ("where + // secmark can be set for example"). It is equivalent to the value 50 and can be + // used in the ip, ip6, and inet families. + SecurityPriority BaseChainPriority = "security" + + // SNATPriority is the standard priority for SNAT operations. In the ip, ip6, and + // inet families, it is equivalent to the value 100. In the bridge family it is + // equivalent to the value 300. In both cases it can only be used from the + // postrouting hook. + SNATPriority BaseChainPriority = "srcnat" +) + +// BaseChainPolicy sets what happens to packets not explicitly accepted or refused by a +// base chain. +type BaseChainPolicy string + +const ( + // AcceptPolicy, which is the default, accepts any unmatched packets (though, + // as with any other nftables chain, a later chain can drop or reject it). + AcceptPolicy BaseChainPolicy = "accept" + + // DropPolicy drops any unmatched packets. + DropPolicy BaseChainPolicy = "drop" +) + +// Chain represents an nftables chain; either a "base chain" (if Type, Hook, and Priority +// are specified), or a "regular chain" (if they are not). +type Chain struct { + // Family is the nftables family of the chain's table. You do not normally need to + // fill this in because it will be filled in for you automatically from the + // Interface. + Family Family + + // Table is the name of the chain's table. You do not normally need to fill this + // in because it will be filled in for you automatically from the Interface. + Table string + + // Name is the name of the chain. + Name string + + // Type is the chain type; this must be set for a base chain and unset for a + // regular chain. + Type *BaseChainType + // Hook is the hook that the chain is connected to; this must be set for a base + // chain and unset for a regular chain. + Hook *BaseChainHook + // Priority is the chain priority; this must be set for a base chain and unset for + // a regular chain. You can call ParsePriority() to convert this to a number. + Priority *BaseChainPriority + + // Policy is the policy for packets not explicitly accepted or refused by a base + // chain. + Policy *BaseChainPolicy + + // Device is the network interface that the chain is attached to; this must be set + // for a base chain connected to the "ingress" or "egress" hooks, and unset for + // all other chains. + Device *string + + // Comment is an optional comment for the object. (Requires kernel >= 5.10 and + // nft >= 0.9.7; otherwise this field will be silently ignored. Requires + // nft >= 1.0.8 to include comments in List() results.) + Comment *string + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil + Handle *int +} + +// Rule represents a rule in a chain +type Rule struct { + // Family is the nftables family of the rule's table. You do not normally need to + // fill this in because it will be filled in for you automatically from the + // Interface. + Family Family + + // Table is the name of the rule's table. You do not normally need to fill this + // in because it will be filled in for you automatically from the Interface. + Table string + + // Chain is the name of the chain that contains this rule + Chain string + + // Rule is the rule in standard nftables syntax. (Should be empty on Delete, but + // is ignored if not.) Note that this does not include any rule comment, which is + // separate from the rule itself. + Rule string + + // Comment is an optional comment for the rule. + Comment *string + + // Index is the number of a rule (counting from 0) to Add this Rule after or + // Insert it before. Cannot be specified along with Handle. If neither Index + // nor Handle is specified then Add appends the rule the end of the chain and + // Insert prepends it to the beginning. + Index *int + + // Handle is a rule handle. In Add or Insert, if set, this is the handle of + // existing rule to put the new rule after/before. In Delete or Replace, this + // indicates the existing rule to delete/replace, and is mandatory. In the result + // of a List, this will indicate the rule's handle that can then be used in a + // later operation. + Handle *int +} + +// SetFlag represents a set or map flag +type SetFlag string + +const ( + // ConstantFlag is a flag indicating that the set/map is constant. FIXME UNDOCUMENTED + ConstantFlag SetFlag = "constant" + + // DynamicFlag is a flag indicating that the set contains stateful objects + // (counters, quotas, or limits) that will be dynamically updated. + DynamicFlag SetFlag = "dynamic" + + // IntervalFlag is a flag indicating that the set contains either CIDR elements or + // IP ranges. + IntervalFlag SetFlag = "interval" + + // TimeoutFlag is a flag indicating that the set/map has a timeout after which + // dynamically added elements will be removed. (It is set automatically if the + // set/map has a Timeout.) + TimeoutFlag SetFlag = "timeout" +) + +// SetPolicy represents a set or map storage policy +type SetPolicy string + +const ( + // PolicyPerformance FIXME + PerformancePolicy SetPolicy = "performance" + + // PolicyMemory FIXME + MemoryPolicy SetPolicy = "memory" +) + +// Set represents the definition of an nftables set (but not its elements) +type Set struct { + // Family is the nftables family of the set's table. You do not normally need to + // fill this in because it will be filled in for you automatically from the + // Interface. + Family Family + + // Table is the name of the set's table. You do not normally need to fill this + // in because it will be filled in for you automatically from the Interface. + Table string + + // Name is the name of the set. + Name string + + // Type is the type of the set key (eg "ipv4_addr"). Either Type or TypeOf, but + // not both, must be non-empty. + Type string + + // TypeOf is the type of the set key as an nftables expression (eg "ip saddr"). + // Either Type or TypeOf, but not both, must be non-empty. (Requires at least nft + // 0.9.4, and newer than that for some types.) + TypeOf string + + // Flags are the set flags + Flags []SetFlag + + // Timeout is the time that an element will stay in the set before being removed. + // (Optional; mandatory for sets that will be added to from the packet path) + Timeout *time.Duration + + // GCInterval is the interval at which timed-out elements will be removed from the + // set. (Optional; FIXME DEFAULT) + GCInterval *time.Duration + + // Size if the maximum numer of elements in the set. + // (Optional; mandatory for sets that will be added to from the packet path) + Size *uint64 + + // Policy is the FIXME + Policy *SetPolicy + + // AutoMerge indicates that adjacent/overlapping set elements should be merged + // together (only for interval sets) + AutoMerge *bool + + // Comment is an optional comment for the object. (Requires kernel >= 5.10 and + // nft >= 0.9.7; otherwise this field will be silently ignored.) + Comment *string + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil + Handle *int +} + +// Map represents the definition of an nftables map (but not its elements) +type Map struct { + // Family is the nftables family of the map's table. You do not normally need to + // fill this in because it will be filled in for you automatically from the + // Interface. + Family Family + + // Table is the name of the map's table. You do not normally need to fill this + // in because it will be filled in for you automatically from the Interface. + Table string + + // Name is the name of the map. + Name string + + // Type is the type of the map key and value (eg "ipv4_addr : verdict"). Either + // Type or TypeOf, but not both, must be non-empty. + Type string + + // TypeOf is the type of the set key as an nftables expression (eg "ip saddr : verdict"). + // Either Type or TypeOf, but not both, must be non-empty. (Requires at least nft 0.9.4, + // and newer than that for some types.) + TypeOf string + + // Flags are the map flags + Flags []SetFlag + + // Timeout is the time that an element will stay in the set before being removed. + // (Optional; mandatory for sets that will be added to from the packet path) + Timeout *time.Duration + + // GCInterval is the interval at which timed-out elements will be removed from the + // set. (Optional; FIXME DEFAULT) + GCInterval *time.Duration + + // Size if the maximum numer of elements in the set. + // (Optional; mandatory for sets that will be added to from the packet path) + Size *uint64 + + // Policy is the FIXME + Policy *SetPolicy + + // Comment is an optional comment for the object. (Requires kernel >= 5.10 and + // nft >= 0.9.7; otherwise this field will be silently ignored.) + Comment *string + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil + Handle *int +} + +// Element represents a set or map element +type Element struct { + // Family is the nftables family of the element's table. You do not normally need + // to fill this in because it will be filled in for you automatically from the + // Interface. + Family Family + + // Table is the name of the element's table. You do not normally need to fill this + // in because it will be filled in for you automatically from the Interface. + Table string + + // Set is the name of the set that contains this element (or the empty string if + // this is a map element.) + Set string + + // Map is the name of the map that contains this element (or the empty string if + // this is a set element.) + Map string + + // Key is the element key. (The list contains a single element for "simple" keys, + // or multiple elements for concatenations.) + Key []string + + // Value is the map element value. As with Key, this may be a single value or + // multiple. For set elements, this must be nil. + Value []string + + // Comment is an optional comment for the element + Comment *string +} + +type FlowtableIngressPriority string + +const ( + // FilterIngressPriority is the priority for the filter value in the Ingress hook + // that stands for 0. + FilterIngressPriority FlowtableIngressPriority = "filter" +) + +// Flowtable represents an nftables flowtable. +// https://wiki.nftables.org/wiki-nftables/index.php/Flowtables +type Flowtable struct { + // Family is the nftables family of the flowtable's table. You do not normally + // need to fill this in because it will be filled in for you automatically from + // the Interface. + Family Family + + // Table is the name of the flowtable's table. You do not normally need to fill + // this in because it will be filled in for you automatically from the Interface. + Table string + + // Name is the name of the flowtable. + Name string + + // The Priority can be a signed integer or FlowtableIngressPriority which stands for 0. + // Addition and subtraction can be used to set relative priority, e.g. filter + 5 equals to 5. + Priority *FlowtableIngressPriority + + // The Devices are specified as iifname(s) of the input interface(s) of the traffic + // that should be offloaded. + Devices []string + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil + Handle *int +} + +// Counter represents named counter +type Counter struct { + // Family is the nftables family of the counter's table. You do not normally + // need to fill this in because it will be filled in for you automatically from + // the Interface. + Family Family + + // Table is the name of the counter's table. You do not normally need to fill + // this in because it will be filled in for you automatically from the Interface. + Table string + + // Name is the name of the named counter + Name string + + // Comment is an optional comment for the counter + Comment *string + + // Packets represents numbers of packets tracked by the counter. + // This will be filled in by ListCounters() but can be nil when creating new counter. + Packets *uint64 + + // Bytes represents numbers of bytes tracked by the counter. + // This will be filled in by ListCounters() but can be nil when creating new counter. + Bytes *uint64 + + // Handle is an identifier that can be used to uniquely identify an object when + // deleting it. When adding a new object, this must be nil + Handle *int +} diff --git a/vendor/sigs.k8s.io/knftables/util.go b/vendor/sigs.k8s.io/knftables/util.go new file mode 100644 index 0000000000..4ff14af246 --- /dev/null +++ b/vendor/sigs.k8s.io/knftables/util.go @@ -0,0 +1,117 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package knftables + +import ( + "fmt" + "strconv" + "strings" +) + +// PtrTo can be used to fill in optional field values in objects +func PtrTo[T any](val T) *T { + return &val +} + +var numericPriorities = map[string]int{ + "raw": -300, + "mangle": -150, + "dstnat": -100, + "filter": 0, + "security": 50, + "srcnat": 100, +} + +var bridgeNumericPriorities = map[string]int{ + "dstnat": -300, + "filter": -200, + "out": 100, + "srcnat": 300, +} + +// ParsePriority tries to convert the string form of a chain priority into a number +func ParsePriority(family Family, priority string) (int, error) { + val, err := strconv.Atoi(priority) + if err == nil { + return val, nil + } + + modVal := 0 + if i := strings.IndexAny(priority, "+-"); i != -1 { + mod := priority[i:] + modVal, err = strconv.Atoi(mod) + if err != nil { + return 0, fmt.Errorf("could not parse modifier %q: %w", mod, err) + } + priority = priority[:i] + } + + var found bool + if family == BridgeFamily { + val, found = bridgeNumericPriorities[priority] + } else { + val, found = numericPriorities[priority] + } + if !found { + return 0, fmt.Errorf("unknown priority %q", priority) + } + + return val + modVal, nil +} + +// Concat is a helper (primarily) for constructing Rule objects. It takes a series of +// arguments and concatenates them together into a single string with spaces between the +// arguments. Strings are output as-is, string arrays are output element by element, +// numbers are output as with `fmt.Sprintf("%d")`, and all other types are output as with +// `fmt.Sprintf("%s")`. To help with set/map lookup syntax, an argument of "@" will not +// be followed by a space, so you can do, eg, `Concat("ip saddr", "@", setName)`. +func Concat(args ...interface{}) string { + b := &strings.Builder{} + var needSpace, wroteAt bool + for _, arg := range args { + switch x := arg.(type) { + case string: + if needSpace { + b.WriteByte(' ') + } + b.WriteString(x) + wroteAt = (x == "@") + case []string: + for _, s := range x { + if needSpace { + b.WriteByte(' ') + } + b.WriteString(s) + wroteAt = (s == "@") + needSpace = b.Len() > 0 && !wroteAt + } + case int, uint, int16, uint16, int32, uint32, int64, uint64: + if needSpace { + b.WriteByte(' ') + } + fmt.Fprintf(b, "%d", x) + default: + if needSpace { + b.WriteByte(' ') + } + fmt.Fprintf(b, "%s", x) + } + + needSpace = b.Len() > 0 && !wroteAt + } + return b.String() +}